Lines Matching +full:r +full:- +full:xp

2 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
15 ; CHECK-LABEL: @test_scalar_uadd_canonical(
16 ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 10)
17 ; CHECK-NEXT: ret i8 [[X]]
24 ; CHECK-LABEL: @test_vector_uadd_canonical(
25 ; CHECK-NEXT: [[X:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 20>)
26 ; CHECK-NEXT: ret <2 x i8> [[X]]
34 ; CHECK-LABEL: @test_scalar_sadd_canonical(
35 ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
36 ; CHECK-NEXT: ret i8 [[X]]
38 %x = call i8 @llvm.sadd.sat.i8(i8 -10, i8 %a)
43 ; CHECK-LABEL: @test_vector_sadd_canonical(
44 ; CHECK-NEXT: [[X:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 -20>)
45 ; CHECK-NEXT: ret <2 x i8> [[X]]
47 %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 10, i8 -20>, <2 x i8> %a)
53 ; CHECK-LABEL: @test_scalar_uadd_combine(
54 ; CHECK-NEXT: [[X2:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 30)
55 ; CHECK-NEXT: ret i8 [[X2]]
63 ; CHECK-LABEL: @test_vector_uadd_combine(
64 ; CHECK-NEXT: [[X2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 30))
65 ; CHECK-NEXT: ret <2 x i8> [[X2]]
74 ; CHECK-LABEL: @test_vector_uadd_combine_non_splat(
75 ; CHECK-NEXT: [[X1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 20>)
76 ; CHECK-NEXT: [[X2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 30, i8 40>)
77 ; CHECK-NEXT: ret <2 x i8> [[X2]]
86 ; CHECK-LABEL: @test_scalar_uadd_overflow(
87 ; CHECK-NEXT: ret i8 -1
95 ; CHECK-LABEL: @test_vector_uadd_overflow(
96 ; CHECK-NEXT: ret <2 x i8> splat (i8 -1)
105 ; CHECK-LABEL: @test_scalar_sadd_both_positive(
106 ; CHECK-NEXT: [[Z2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
107 ; CHECK-NEXT: ret i8 [[Z2]]
115 ; CHECK-LABEL: @test_vector_sadd_both_positive(
116 ; CHECK-NEXT: [[Z2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 30))
117 ; CHECK-NEXT: ret <2 x i8> [[Z2]]
125 ; CHECK-LABEL: @test_scalar_sadd_both_negative(
126 ; CHECK-NEXT: [[U2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
127 ; CHECK-NEXT: ret i8 [[U2]]
129 %u1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 -10)
130 %u2 = call i8 @llvm.sadd.sat.i8(i8 %u1, i8 -20)
135 ; CHECK-LABEL: @test_vector_sadd_both_negative(
136 ; CHECK-NEXT: [[U2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 -30))
137 ; CHECK-NEXT: ret <2 x i8> [[U2]]
139 %u1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
140 %u2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
146 ; CHECK-LABEL: @test_scalar_sadd_different_sign(
147 ; CHECK-NEXT: [[V1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 10)
148 ; CHECK-NEXT: [[V2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[V1]], i8 -20)
149 ; CHECK-NEXT: ret i8 [[V2]]
152 %v2 = call i8 @llvm.sadd.sat.i8(i8 %v1, i8 -20)
158 ; CHECK-LABEL: @test_scalar_sadd_overflow(
159 ; CHECK-NEXT: [[W1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 100)
160 ; CHECK-NEXT: [[W2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[W1]], i8 100)
161 ; CHECK-NEXT: ret i8 [[W2]]
170 ; CHECK-LABEL: @test_scalar_uadd_neg_neg(
171 ; CHECK-NEXT: ret i8 -1
173 %a_neg = or i8 %a, -128
174 %r = call i8 @llvm.uadd.sat.i8(i8 %a_neg, i8 -10)
175 ret i8 %r
179 ; CHECK-LABEL: @test_vector_uadd_neg_neg(
180 ; CHECK-NEXT: ret <2 x i8> splat (i8 -1)
182 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
183 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
184 ret <2 x i8> %r
189 ; CHECK-LABEL: @test_scalar_uadd_nneg_nneg(
190 ; CHECK-NEXT: [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
191 ; CHECK-NEXT: [[R:%.*]] = add nuw i8 [[A_NNEG]], 10
192 ; CHECK-NEXT: ret i8 [[R]]
195 %r = call i8 @llvm.uadd.sat.i8(i8 %a_nneg, i8 10)
196 ret i8 %r
200 ; CHECK-LABEL: @test_vector_uadd_nneg_nneg(
201 ; CHECK-NEXT: [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], splat (i8 127)
202 ; CHECK-NEXT: [[R:%.*]] = add nuw <2 x i8> [[A_NNEG]], <i8 10, i8 20>
203 ; CHECK-NEXT: ret <2 x i8> [[R]]
206 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
207 ret <2 x i8> %r
212 ; CHECK-LABEL: @test_scalar_uadd_neg_nneg(
213 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
214 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A_NEG]], i8 10)
215 ; CHECK-NEXT: ret i8 [[R]]
217 %a_neg = or i8 %a, -128
218 %r = call i8 @llvm.uadd.sat.i8(i8 %a_neg, i8 10)
219 ret i8 %r
223 ; CHECK-LABEL: @test_vector_uadd_neg_nneg(
224 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], splat (i8 -128)
225 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 10, i8 20>)
226 ; CHECK-NEXT: ret <2 x i8> [[R]]
228 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
229 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
230 ret <2 x i8> %r
234 ; CHECK-LABEL: @test_scalar_uadd_never_overflows(
235 ; CHECK-NEXT: [[A_MASKED:%.*]] = and i8 [[A:%.*]], -127
236 ; CHECK-NEXT: [[R:%.*]] = add nuw nsw i8 [[A_MASKED]], 1
237 ; CHECK-NEXT: ret i8 [[R]]
240 %r = call i8 @llvm.uadd.sat.i8(i8 %a_masked, i8 1)
241 ret i8 %r
245 ; CHECK-LABEL: @test_vector_uadd_never_overflows(
246 ; CHECK-NEXT: [[A_MASKED:%.*]] = and <2 x i8> [[A:%.*]], splat (i8 -127)
247 ; CHECK-NEXT: [[R:%.*]] = add nuw nsw <2 x i8> [[A_MASKED]], splat (i8 1)
248 ; CHECK-NEXT: ret <2 x i8> [[R]]
251 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_masked, <2 x i8> <i8 1, i8 1>)
252 ret <2 x i8> %r
256 ; CHECK-LABEL: @test_scalar_uadd_always_overflows(
257 ; CHECK-NEXT: ret i8 -1
260 %r = call i8 @llvm.uadd.sat.i8(i8 %a_masked, i8 64)
261 ret i8 %r
265 ; CHECK-LABEL: @test_vector_uadd_always_overflows(
266 ; CHECK-NEXT: ret <2 x i8> splat (i8 -1)
269 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_masked, <2 x i8> <i8 64, i8 64>)
270 ret <2 x i8> %r
275 ; CHECK-LABEL: @test_scalar_sadd_neg_nneg(
276 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
277 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_NEG]], 10
278 ; CHECK-NEXT: ret i8 [[R]]
280 %a_neg = or i8 %a, -128
281 %r = call i8 @llvm.sadd.sat.i8(i8 %a_neg, i8 10)
282 ret i8 %r
286 ; CHECK-LABEL: @test_vector_sadd_neg_nneg(
287 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], splat (i8 -128)
288 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_NEG]], <i8 10, i8 20>
289 ; CHECK-NEXT: ret <2 x i8> [[R]]
291 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
292 %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
293 ret <2 x i8> %r
298 ; CHECK-LABEL: @test_scalar_sadd_nneg_neg(
299 ; CHECK-NEXT: [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
300 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_NNEG]], -10
301 ; CHECK-NEXT: ret i8 [[R]]
304 %r = call i8 @llvm.sadd.sat.i8(i8 %a_nneg, i8 -10)
305 ret i8 %r
309 ; CHECK-LABEL: @test_vector_sadd_nneg_neg(
310 ; CHECK-NEXT: [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], splat (i8 127)
311 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_NNEG]], <i8 -10, i8 -20>
312 ; CHECK-NEXT: ret <2 x i8> [[R]]
315 %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 -10, i8 -20>)
316 ret <2 x i8> %r
321 ; CHECK-LABEL: @test_scalar_sadd_neg_neg(
322 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
323 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 -10)
324 ; CHECK-NEXT: ret i8 [[R]]
326 %a_neg = or i8 %a, -128
327 %r = call i8 @llvm.sadd.sat.i8(i8 %a_neg, i8 -10)
328 ret i8 %r
332 ; CHECK-LABEL: @test_vector_sadd_neg_neg(
333 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], splat (i8 -128)
334 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
335 ; CHECK-NEXT: ret <2 x i8> [[R]]
337 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
338 %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
339 ret <2 x i8> %r
343 ; CHECK-LABEL: @test_scalar_sadd_always_overflows_low(
344 ; CHECK-NEXT: ret i8 -128
346 %cmp = icmp slt i8 %a, -120
347 %min = select i1 %cmp, i8 %a, i8 -120
348 %r = call i8 @llvm.sadd.sat.i8(i8 %min, i8 -10)
349 ret i8 %r
353 ; CHECK-LABEL: @test_scalar_sadd_always_overflows_high(
354 ; CHECK-NEXT: ret i8 127
358 %r = call i8 @llvm.sadd.sat.i8(i8 %max, i8 10)
359 ret i8 %r
362 ; While this is a no-overflow condition, the nuw flag gets lost due to
365 ; CHECK-LABEL: @test_scalar_uadd_sub_nuw_lost_no_ov(
366 ; CHECK-NEXT: [[B:%.*]] = add i8 [[A:%.*]], -10
367 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[B]], i8 9)
368 ; CHECK-NEXT: ret i8 [[R]]
371 %r = call i8 @llvm.uadd.sat.i8(i8 %b, i8 9)
372 ret i8 %r
376 ; CHECK-LABEL: @test_scalar_uadd_urem_no_ov(
377 ; CHECK-NEXT: [[B:%.*]] = urem i8 [[A:%.*]], 100
378 ; CHECK-NEXT: [[R:%.*]] = add nuw nsw i8 [[B]], -100
379 ; CHECK-NEXT: ret i8 [[R]]
382 %r = call i8 @llvm.uadd.sat.i8(i8 %b, i8 156)
383 ret i8 %r
387 ; CHECK-LABEL: @test_scalar_uadd_urem_may_ov(
388 ; CHECK-NEXT: [[B:%.*]] = urem i8 [[A:%.*]], 100
389 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[B]], i8 -99)
390 ; CHECK-NEXT: ret i8 [[R]]
393 %r = call i8 @llvm.uadd.sat.i8(i8 %b, i8 157)
394 ret i8 %r
399 ; CHECK-LABEL: @test_scalar_uadd_udiv_known_bits(
400 ; CHECK-NEXT: [[AA:%.*]] = udiv i8 -66, [[A:%.*]]
401 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 63
402 ; CHECK-NEXT: [[R:%.*]] = add nuw i8 [[AA]], [[BB]]
403 ; CHECK-NEXT: ret i8 [[R]]
407 %r = call i8 @llvm.uadd.sat.i8(i8 %aa, i8 %bb)
408 ret i8 %r
412 ; CHECK-LABEL: @test_scalar_sadd_srem_no_ov(
413 ; CHECK-NEXT: [[B:%.*]] = srem i8 [[A:%.*]], 100
414 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[B]], 28
415 ; CHECK-NEXT: ret i8 [[R]]
418 %r = call i8 @llvm.sadd.sat.i8(i8 %b, i8 28)
419 ret i8 %r
423 ; CHECK-LABEL: @test_scalar_sadd_srem_may_ov(
424 ; CHECK-NEXT: [[B:%.*]] = srem i8 [[A:%.*]], 100
425 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[B]], i8 29)
426 ; CHECK-NEXT: ret i8 [[R]]
429 %r = call i8 @llvm.sadd.sat.i8(i8 %b, i8 29)
430 ret i8 %r
434 ; CHECK-LABEL: @test_scalar_sadd_srem_and_no_ov(
435 ; CHECK-NEXT: [[AA:%.*]] = srem i8 [[A:%.*]], 100
436 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 15
437 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[AA]], [[BB]]
438 ; CHECK-NEXT: ret i8 [[R]]
442 %r = call i8 @llvm.sadd.sat.i8(i8 %aa, i8 %bb)
443 ret i8 %r
457 ; CHECK-LABEL: @test_scalar_usub_canonical(
458 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
459 ; CHECK-NEXT: ret i8 [[R]]
461 %r = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
462 ret i8 %r
467 ; CHECK-LABEL: @test_scalar_ssub_canonical(
468 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
469 ; CHECK-NEXT: ret i8 [[R]]
471 %r = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
472 ret i8 %r
476 ; CHECK-LABEL: @test_vector_ssub_canonical(
477 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 -10))
478 ; CHECK-NEXT: ret <2 x i8> [[R]]
480 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
481 ret <2 x i8> %r
485 ; CHECK-LABEL: @test_vector_ssub_canonical_min_non_splat(
486 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 10>)
487 ; CHECK-NEXT: ret <2 x i8> [[R]]
489 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 -10>)
490 ret <2 x i8> %r
495 ; CHECK-LABEL: @test_scalar_ssub_canonical_min(
496 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 -128)
497 ; CHECK-NEXT: ret i8 [[R]]
499 %r = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -128)
500 ret i8 %r
504 ; CHECK-LABEL: @test_vector_ssub_canonical_min(
505 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -128, i8 -10>)
506 ; CHECK-NEXT: ret <2 x i8> [[R]]
508 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -128, i8 -10>)
509 ret <2 x i8> %r
514 ; CHECK-LABEL: @test_scalar_usub_combine(
515 ; CHECK-NEXT: [[X2:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 30)
516 ; CHECK-NEXT: ret i8 [[X2]]
525 ; CHECK-LABEL: @test_simplify_decrement(
526 ; CHECK-NEXT: [[I2:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 1)
527 ; CHECK-NEXT: ret i8 [[I2]]
538 ; CHECK-LABEL: @test_simplify_decrement_ne(
539 ; CHECK-NEXT: [[I:%.*]] = icmp ne i8 [[A:%.*]], 0
540 ; CHECK-NEXT: call void @use.i1(i1 [[I]])
541 ; CHECK-NEXT: [[I2:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A]], i8 1)
542 ; CHECK-NEXT: ret i8 [[I2]]
546 %i1 = add i8 %a, -1
552 ; CHECK-LABEL: @test_simplify_decrement_vec(
553 ; CHECK-NEXT: [[I2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 1))
554 ; CHECK-NEXT: ret <2 x i8> [[I2]]
563 ; CHECK-LABEL: @test_simplify_decrement_vec_poison(
564 ; CHECK-NEXT: [[I2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 1))
565 ; CHECK-NEXT: ret <2 x i8> [[I2]]
574 ; CHECK-LABEL: @test_simplify_decrement_invalid_ne(
575 ; CHECK-NEXT: [[I_NOT:%.*]] = icmp eq i8 [[A:%.*]], 0
576 ; CHECK-NEXT: [[I2:%.*]] = sext i1 [[I_NOT]] to i8
577 ; CHECK-NEXT: ret i8 [[I2]]
586 ; CHECK-LABEL: @test_invalid_simplify_sub2(
587 ; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 0
588 ; CHECK-NEXT: [[I1:%.*]] = add i8 [[A]], -2
589 ; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 0, i8 [[I1]]
590 ; CHECK-NEXT: ret i8 [[I2]]
599 ; CHECK-LABEL: @test_invalid_simplify_eq2(
600 ; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 2
601 ; CHECK-NEXT: [[I1:%.*]] = add i8 [[A]], -1
602 ; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 0, i8 [[I1]]
603 ; CHECK-NEXT: ret i8 [[I2]]
612 ; CHECK-LABEL: @test_invalid_simplify_select_1(
613 ; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 0
614 ; CHECK-NEXT: [[I1:%.*]] = add i8 [[A]], -1
615 ; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 1, i8 [[I1]]
616 ; CHECK-NEXT: ret i8 [[I2]]
625 ; CHECK-LABEL: @test_invalid_simplify_other(
626 ; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 0
627 ; CHECK-NEXT: [[I1:%.*]] = add i8 [[B:%.*]], -1
628 ; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 0, i8 [[I1]]
629 ; CHECK-NEXT: ret i8 [[I2]]
638 ; CHECK-LABEL: @test_vector_usub_combine(
639 ; CHECK-NEXT: [[X2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 30))
640 ; CHECK-NEXT: ret <2 x i8> [[X2]]
649 ; CHECK-LABEL: @test_vector_usub_combine_non_splat(
650 ; CHECK-NEXT: [[X1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 20>)
651 ; CHECK-NEXT: [[X2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 30, i8 40>)
652 ; CHECK-NEXT: ret <2 x i8> [[X2]]
661 ; CHECK-LABEL: @test_scalar_usub_overflow(
662 ; CHECK-NEXT: ret i8 0
670 ; CHECK-LABEL: @test_vector_usub_overflow(
671 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
680 ; CHECK-LABEL: @test_scalar_ssub_both_positive(
681 ; CHECK-NEXT: [[Z2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
682 ; CHECK-NEXT: ret i8 [[Z2]]
690 ; CHECK-LABEL: @test_vector_ssub_both_positive(
691 ; CHECK-NEXT: [[Z2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 -30))
692 ; CHECK-NEXT: ret <2 x i8> [[Z2]]
700 ; CHECK-LABEL: @test_scalar_ssub_both_negative(
701 ; CHECK-NEXT: [[U2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
702 ; CHECK-NEXT: ret i8 [[U2]]
704 %u1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -10)
705 %u2 = call i8 @llvm.ssub.sat.i8(i8 %u1, i8 -20)
710 ; CHECK-LABEL: @test_vector_ssub_both_negative(
711 ; CHECK-NEXT: [[U2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 30))
712 ; CHECK-NEXT: ret <2 x i8> [[U2]]
714 %u1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
715 %u2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
721 ; CHECK-LABEL: @test_scalar_ssub_different_sign(
722 ; CHECK-NEXT: [[V1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
723 ; CHECK-NEXT: [[V2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[V1]], i8 20)
724 ; CHECK-NEXT: ret i8 [[V2]]
727 %v2 = call i8 @llvm.ssub.sat.i8(i8 %v1, i8 -20)
733 ; CHECK-LABEL: @test_scalar_sadd_ssub(
734 ; CHECK-NEXT: [[V2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
735 ; CHECK-NEXT: ret i8 [[V2]]
738 %v2 = call i8 @llvm.ssub.sat.i8(i8 %v1, i8 -20)
743 ; CHECK-LABEL: @test_vector_sadd_ssub(
744 ; CHECK-NEXT: [[V2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> splat (i8 -30))
745 ; CHECK-NEXT: ret <2 x i8> [[V2]]
747 %v1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 -10, i8 -10>, <2 x i8> %a)
754 ; CHECK-LABEL: @test_scalar_ssub_overflow(
755 ; CHECK-NEXT: [[W1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -100)
756 ; CHECK-NEXT: [[W2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[W1]], i8 -100)
757 ; CHECK-NEXT: ret i8 [[W2]]
766 ; CHECK-LABEL: @test_scalar_usub_nneg_neg(
767 ; CHECK-NEXT: ret i8 0
770 %r = call i8 @llvm.usub.sat.i8(i8 %a_nneg, i8 -10)
771 ret i8 %r
775 ; CHECK-LABEL: @test_vector_usub_nneg_neg(
776 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
779 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 -10, i8 -20>)
780 ret <2 x i8> %r
785 ; CHECK-LABEL: @test_scalar_usub_neg_nneg(
786 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
787 ; CHECK-NEXT: [[R:%.*]] = add i8 [[A_NEG]], -10
788 ; CHECK-NEXT: ret i8 [[R]]
790 %a_neg = or i8 %a, -128
791 %r = call i8 @llvm.usub.sat.i8(i8 %a_neg, i8 10)
792 ret i8 %r
796 ; CHECK-LABEL: @test_vector_usub_neg_nneg(
797 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], splat (i8 -128)
798 ; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[A_NEG]], <i8 -10, i8 -20>
799 ; CHECK-NEXT: ret <2 x i8> [[R]]
801 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
802 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
803 ret <2 x i8> %r
808 ; CHECK-LABEL: @test_scalar_usub_nneg_nneg(
809 ; CHECK-NEXT: [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
810 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A_NNEG]], i8 10)
811 ; CHECK-NEXT: ret i8 [[R]]
814 %r = call i8 @llvm.usub.sat.i8(i8 %a_nneg, i8 10)
815 ret i8 %r
819 ; CHECK-LABEL: @test_vector_usub_nneg_nneg(
820 ; CHECK-NEXT: [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], splat (i8 127)
821 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 10, i8 20>)
822 ; CHECK-NEXT: ret <2 x i8> [[R]]
825 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
826 ret <2 x i8> %r
830 ; CHECK-LABEL: @test_scalar_usub_never_overflows(
831 ; CHECK-NEXT: [[A_MASKED:%.*]] = or i8 [[A:%.*]], 64
832 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_MASKED]], -10
833 ; CHECK-NEXT: ret i8 [[R]]
836 %r = call i8 @llvm.usub.sat.i8(i8 %a_masked, i8 10)
837 ret i8 %r
841 ; CHECK-LABEL: @test_vector_usub_never_overflows(
842 ; CHECK-NEXT: [[A_MASKED:%.*]] = or <2 x i8> [[A:%.*]], splat (i8 64)
843 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_MASKED]], splat (i8 -10)
844 ; CHECK-NEXT: ret <2 x i8> [[R]]
847 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_masked, <2 x i8> <i8 10, i8 10>)
848 ret <2 x i8> %r
852 ; CHECK-LABEL: @test_scalar_usub_always_overflows(
853 ; CHECK-NEXT: ret i8 0
856 %r = call i8 @llvm.usub.sat.i8(i8 %a_masked, i8 100)
857 ret i8 %r
861 ; CHECK-LABEL: @test_vector_usub_always_overflows(
862 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
865 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_masked, <2 x i8> <i8 100, i8 100>)
866 ret <2 x i8> %r
871 ; CHECK-LABEL: @test_scalar_ssub_neg_neg(
872 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
873 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_NEG]], 10
874 ; CHECK-NEXT: ret i8 [[R]]
876 %a_neg = or i8 %a, -128
877 %r = call i8 @llvm.ssub.sat.i8(i8 %a_neg, i8 -10)
878 ret i8 %r
882 ; CHECK-LABEL: @test_vector_ssub_neg_neg(
883 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], splat (i8 -128)
884 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_NEG]], <i8 10, i8 20>
885 ; CHECK-NEXT: ret <2 x i8> [[R]]
887 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
888 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
889 ret <2 x i8> %r
894 ; CHECK-LABEL: @test_scalar_ssub_nneg_nneg(
895 ; CHECK-NEXT: [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
896 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_NNEG]], -10
897 ; CHECK-NEXT: ret i8 [[R]]
900 %r = call i8 @llvm.ssub.sat.i8(i8 %a_nneg, i8 10)
901 ret i8 %r
905 ; CHECK-LABEL: @test_vector_ssub_nneg_nneg(
906 ; CHECK-NEXT: [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], splat (i8 127)
907 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_NNEG]], <i8 -10, i8 -20>
908 ; CHECK-NEXT: ret <2 x i8> [[R]]
911 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
912 ret <2 x i8> %r
917 ; CHECK-LABEL: @test_scalar_ssub_neg_nneg(
918 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
919 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 -10)
920 ; CHECK-NEXT: ret i8 [[R]]
922 %a_neg = or i8 %a, -128
923 %r = call i8 @llvm.ssub.sat.i8(i8 %a_neg, i8 10)
924 ret i8 %r
928 ; CHECK-LABEL: @test_vector_ssub_neg_nneg(
929 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], splat (i8 -128)
930 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
931 ; CHECK-NEXT: ret <2 x i8> [[R]]
933 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
934 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
935 ret <2 x i8> %r
939 ; CHECK-LABEL: @test_scalar_ssub_always_overflows_low(
940 ; CHECK-NEXT: ret i8 -128
944 %r = call i8 @llvm.ssub.sat.i8(i8 -10, i8 %max)
945 ret i8 %r
949 ; CHECK-LABEL: @test_scalar_ssub_always_overflows_high(
950 ; CHECK-NEXT: ret i8 127
952 %cmp = icmp slt i8 %a, -120
953 %min = select i1 %cmp, i8 %a, i8 -120
954 %r = call i8 @llvm.ssub.sat.i8(i8 10, i8 %min)
955 ret i8 %r
959 ; CHECK-LABEL: @test_scalar_usub_add_nuw_no_ov(
960 ; CHECK-NEXT: [[R:%.*]] = add i8 [[A:%.*]], 1
961 ; CHECK-NEXT: ret i8 [[R]]
964 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 9)
965 ret i8 %r
969 ; CHECK-LABEL: @test_scalar_usub_add_nuw_nsw_no_ov(
970 ; CHECK-NEXT: [[R:%.*]] = add i8 [[A:%.*]], 1
971 ; CHECK-NEXT: ret i8 [[R]]
974 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 9)
975 ret i8 %r
979 ; CHECK-LABEL: @test_scalar_usub_add_nuw_eq(
980 ; CHECK-NEXT: ret i8 [[A:%.*]]
983 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 10)
984 ret i8 %r
988 ; CHECK-LABEL: @test_scalar_usub_add_nuw_may_ov(
989 ; CHECK-NEXT: [[B:%.*]] = add nuw i8 [[A:%.*]], 10
990 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[B]], i8 11)
991 ; CHECK-NEXT: ret i8 [[R]]
994 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 11)
995 ret i8 %r
999 ; CHECK-LABEL: @test_scalar_usub_urem_must_ov(
1000 ; CHECK-NEXT: ret i8 0
1003 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 10)
1004 ret i8 %r
1010 ; CHECK-LABEL: @test_scalar_usub_urem_must_zero(
1011 ; CHECK-NEXT: [[B:%.*]] = urem i8 [[A:%.*]], 10
1012 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[B]], i8 9)
1013 ; CHECK-NEXT: ret i8 [[R]]
1016 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 9)
1017 ret i8 %r
1022 ; CHECK-LABEL: @test_scalar_usub_add_nuw_known_bits(
1023 ; CHECK-NEXT: [[AA:%.*]] = add nuw i8 [[A:%.*]], 10
1024 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7
1025 ; CHECK-NEXT: [[R:%.*]] = sub nuw i8 [[AA]], [[BB]]
1026 ; CHECK-NEXT: ret i8 [[R]]
1030 %r = call i8 @llvm.usub.sat.i8(i8 %aa, i8 %bb)
1031 ret i8 %r
1035 ; CHECK-LABEL: @test_scalar_usub_add_nuw_inferred(
1036 ; CHECK-NEXT: [[B:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
1037 ; CHECK-NEXT: [[R:%.*]] = add nuw i8 [[B]], 9
1038 ; CHECK-NEXT: ret i8 [[R]]
1041 %r = add i8 %b, 9
1042 ret i8 %r
1046 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov(
1047 ; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[A:%.*]], splat (i8 1)
1048 ; CHECK-NEXT: ret <2 x i8> [[R]]
1051 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %b, <2 x i8> <i8 9, i8 9>)
1052 ret <2 x i8> %r
1056 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat1(
1057 ; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[A:%.*]], <i8 0, i8 1>
1058 ; CHECK-NEXT: ret <2 x i8> [[R]]
1061 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %b, <2 x i8> <i8 10, i8 9>)
1062 ret <2 x i8> %r
1066 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat1_poison(
1067 ; CHECK-NEXT: [[R:%.*]] = add <3 x i8> [[A:%.*]], <i8 0, i8 1, i8 poison>
1068 ; CHECK-NEXT: ret <3 x i8> [[R]]
1071 %r = call <3 x i8> @llvm.usub.sat.v3i8(<3 x i8> %b, <3 x i8> <i8 10, i8 9, i8 poison>)
1072 ret <3 x i8> %r
1075 ; Can be optimized if the add nuw RHS constant range handles non-splat vectors.
1077 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat2(
1078 ; CHECK-NEXT: [[B:%.*]] = add nuw <2 x i8> [[A:%.*]], <i8 10, i8 9>
1079 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[B]], <2 x i8> splat (i8 9))
1080 ; CHECK-NEXT: ret <2 x i8> [[R]]
1083 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %b, <2 x i8> <i8 9, i8 9>)
1084 ret <2 x i8> %r
1087 ; Can be optimized if constant range is tracked per-element.
1089 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat3(
1090 ; CHECK-NEXT: [[B:%.*]] = add nuw <2 x i8> [[A:%.*]], <i8 10, i8 9>
1091 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[B]], <2 x i8> <i8 10, i8 9>)
1092 ; CHECK-NEXT: ret <2 x i8> [[R]]
1095 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %b, <2 x i8> <i8 10, i8 9>)
1096 ret <2 x i8> %r
1100 ; CHECK-LABEL: @test_scalar_ssub_add_nsw_no_ov(
1101 ; CHECK-NEXT: [[AA:%.*]] = add nsw i8 [[A:%.*]], 7
1102 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7
1103 ; CHECK-NEXT: [[R:%.*]] = sub nsw i8 [[AA]], [[BB]]
1104 ; CHECK-NEXT: ret i8 [[R]]
1108 %r = call i8 @llvm.ssub.sat.i8(i8 %aa, i8 %bb)
1109 ret i8 %r
1113 ; CHECK-LABEL: @test_scalar_ssub_add_nsw_may_ov(
1114 ; CHECK-NEXT: [[AA:%.*]] = add nsw i8 [[A:%.*]], 6
1115 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7
1116 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[AA]], i8 [[BB]])
1117 ; CHECK-NEXT: ret i8 [[R]]
1121 %r = call i8 @llvm.ssub.sat.i8(i8 %aa, i8 %bb)
1122 ret i8 %r
1126 ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_splat(
1127 ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], splat (i8 7)
1128 ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], splat (i8 7)
1129 ; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> [[AA]], [[BB]]
1130 ; CHECK-NEXT: ret <2 x i8> [[R]]
1134 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
1135 ret <2 x i8> %r
1139 ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat1(
1140 ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], splat (i8 7)
1141 ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 6>
1142 ; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> [[AA]], [[BB]]
1143 ; CHECK-NEXT: ret <2 x i8> [[R]]
1147 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
1148 ret <2 x i8> %r
1152 ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat2(
1153 ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 8>
1154 ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], splat (i8 7)
1155 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]])
1156 ; CHECK-NEXT: ret <2 x i8> [[R]]
1160 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
1161 ret <2 x i8> %r
1165 ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat3(
1166 ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 6>
1167 ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 6>
1168 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]])
1169 ; CHECK-NEXT: ret <2 x i8> [[R]]
1173 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
1174 ret <2 x i8> %r
1178 ; CHECK-LABEL: @test_scalar_usub_add(
1179 ; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1180 ; CHECK-NEXT: ret i8 [[RES]]
1188 ; CHECK-LABEL: @test_scalar_usub_add_extra_use(
1189 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1190 ; CHECK-NEXT: store i8 [[SAT]], ptr [[P:%.*]], align 1
1191 ; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], [[B]]
1192 ; CHECK-NEXT: ret i8 [[RES]]
1201 ; CHECK-LABEL: @test_scalar_usub_add_commuted(
1202 ; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1203 ; CHECK-NEXT: ret i8 [[RES]]
1211 ; CHECK-LABEL: @test_scalar_usub_add_commuted_wrong(
1212 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[B:%.*]], i8 [[A:%.*]])
1213 ; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], [[B]]
1214 ; CHECK-NEXT: ret i8 [[RES]]
1222 ; CHECK-LABEL: @test_scalar_usub_add_const(
1223 ; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 42)
1224 ; CHECK-NEXT: ret i8 [[RES]]
1232 ; CHECK-LABEL: @test_scalar_usub_sub(
1233 ; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umin.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1234 ; CHECK-NEXT: ret i8 [[RES]]
1242 ; CHECK-LABEL: @test_scalar_usub_sub_extra_use(
1243 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1244 ; CHECK-NEXT: store i8 [[SAT]], ptr [[P:%.*]], align 1
1245 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[A]], [[SAT]]
1246 ; CHECK-NEXT: ret i8 [[RES]]
1255 ; CHECK-LABEL: @test_vector_usub_sub(
1256 ; CHECK-NEXT: [[RES:%.*]] = call <2 x i8> @llvm.umin.v2i8(<2 x i8> [[A:%.*]], <2 x i8> [[B:%.*]])
1257 ; CHECK-NEXT: ret <2 x i8> [[RES]]
1265 ; CHECK-LABEL: @test_scalar_usub_sub_wrong(
1266 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1267 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[B]], [[SAT]]
1268 ; CHECK-NEXT: ret i8 [[RES]]
1276 ; CHECK-LABEL: @test_scalar_usub_sub_wrong2(
1277 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1278 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[SAT]], [[B]]
1279 ; CHECK-NEXT: ret i8 [[RES]]
1287 ; CHECK-LABEL: @test_scalar_uadd_sub(
1288 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1289 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[SAT]], [[B]]
1290 ; CHECK-NEXT: ret i8 [[RES]]
1298 ; CHECK-LABEL: @test_scalar_uadd_sub_extra_use(
1299 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1300 ; CHECK-NEXT: store i8 [[SAT]], ptr [[P:%.*]], align 1
1301 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[SAT]], [[B]]
1302 ; CHECK-NEXT: ret i8 [[RES]]
1311 ; CHECK-LABEL: @test_scalar_uadd_sub_commuted(
1312 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[B:%.*]], i8 [[A:%.*]])
1313 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[SAT]], [[B]]
1314 ; CHECK-NEXT: ret i8 [[RES]]
1322 ; CHECK-LABEL: @test_scalar_uadd_sub_commuted_wrong(
1323 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1324 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[B]], [[SAT]]
1325 ; CHECK-NEXT: ret i8 [[RES]]
1333 ; CHECK-LABEL: @test_scalar_uadd_sub_const(
1334 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 42)
1335 ; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], -42
1336 ; CHECK-NEXT: ret i8 [[RES]]
1344 ; CHECK-LABEL: @scalar_uadd_eq_zero(
1345 ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[A:%.*]], [[B:%.*]]
1346 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP1]], 0
1347 ; CHECK-NEXT: ret i1 [[CMP]]
1355 ; CHECK-LABEL: @scalar_uadd_ne_zero(
1356 ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[A:%.*]], [[B:%.*]]
1357 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[TMP1]], 0
1358 ; CHECK-NEXT: ret i1 [[CMP]]
1366 ; CHECK-LABEL: @scalar_usub_eq_zero(
1367 ; CHECK-NEXT: [[CMP:%.*]] = icmp ule i8 [[A:%.*]], [[B:%.*]]
1368 ; CHECK-NEXT: ret i1 [[CMP]]
1376 ; CHECK-LABEL: @scalar_usub_ne_zero(
1377 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[A:%.*]], [[B:%.*]]
1378 ; CHECK-NEXT: ret i1 [[CMP]]
1388 ; CHECK-LABEL: @uadd_sat(
1389 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
1390 ; CHECK-NEXT: ret i32 [[R]]
1392 %notx = xor i32 %x, -1
1395 %r = select i1 %c, i32 -1, i32 %a
1396 ret i32 %r
1400 ; CHECK-LABEL: @uadd_sat_flipped(
1401 ; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 9)
1402 ; CHECK-NEXT: ret i32 [[COND]]
1404 %cmp = icmp ugt i32 %x, -11
1406 %cond = select i1 %cmp, i32 -1, i32 %add
1411 ; CHECK-LABEL: @uadd_sat_flipped2(
1412 ; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 9)
1413 ; CHECK-NEXT: ret i32 [[COND]]
1415 %cmp = icmp ugt i32 %x, -10
1417 %cond = select i1 %cmp, i32 -1, i32 %add
1422 ; CHECK-LABEL: @uadd_sat_flipped3(
1423 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], -8
1424 ; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[X]], 9
1425 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 -1, i32 [[ADD]]
1426 ; CHECK-NEXT: ret i32 [[COND]]
1428 %cmp = icmp ugt i32 %x, -8
1430 %cond = select i1 %cmp, i32 -1, i32 %add
1437 ; CHECK-LABEL: @uadd_sat_flipped3_neg_no_nuw(
1438 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], -8
1439 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1440 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 -1, i32 [[ADD]]
1441 ; CHECK-NEXT: ret i32 [[COND]]
1443 %cmp = icmp ugt i32 %x, -8
1445 %cond = select i1 %cmp, i32 -1, i32 %add
1450 ; CHECK-LABEL: @uadd_sat_negative_one(
1451 ; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 1)
1452 ; CHECK-NEXT: ret i32 [[COND]]
1454 %cmp = icmp eq i32 %x, -1
1456 %cond = select i1 %cmp, i32 -1, i32 %add
1461 ; CHECK-LABEL: @uadd_sat_flipped4_vector(
1462 ; CHECK-NEXT: [[COND:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> splat (i8 9))
1463 ; CHECK-NEXT: ret <2 x i8> [[COND]]
1465 %cmp = icmp ult <2 x i8> %x, <i8 -10, i8 -10>
1467 %cond = select <2 x i1> %cmp, <2 x i8> %add, <2 x i8> <i8 -1, i8 -1>
1472 ; CHECK-LABEL: @uadd_sat_flipped4_poison_vector(
1473 ; CHECK-NEXT: [[COND:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> splat (i8 9))
1474 ; CHECK-NEXT: ret <2 x i8> [[COND]]
1476 %cmp = icmp ult <2 x i8> %x, <i8 -10, i8 poison>
1478 %cond = select <2 x i1> %cmp, <2 x i8> %add,<2 x i8> <i8 -1, i8 -1>
1483 ; CHECK-LABEL: @uadd_sat_flipped4_poison_vector_compare(
1484 ; CHECK-NEXT: [[COND:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> splat (i8 9))
1485 ; CHECK-NEXT: ret <2 x i8> [[COND]]
1487 %cmp = icmp ult <2 x i8> %x, <i8 -10, i8 poison>
1489 %cond = select <2 x i1> %cmp, <2 x i8> %add,<2 x i8> <i8 -1, i8 -1>
1494 ; CHECK-LABEL: @uadd_sat_flipped4_poison_vector_compare2(
1495 ; CHECK-NEXT: ret <2 x i8> splat (i8 -1)
1497 %cmp = icmp ult <2 x i8> %x, <i8 -10, i8 poison>
1499 %cond = select <2 x i1> %cmp, <2 x i8> %add,<2 x i8> <i8 -1, i8 -1>
1506 ; CHECK-LABEL: @uadd_sat_flipped_too_big(
1507 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], -8
1508 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1509 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[ADD]], i32 -1
1510 ; CHECK-NEXT: ret i32 [[COND]]
1512 %cmp = icmp ult i32 %x, -8
1514 %cond = select i1 %cmp, i32 %add, i32 -1
1521 ; CHECK-LABEL: @uadd_sat_flipped_wrong_bounds(
1522 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], -13
1523 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1524 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 -1, i32 [[ADD]]
1525 ; CHECK-NEXT: ret i32 [[COND]]
1527 %cmp = icmp uge i32 %x, -12
1529 %cond = select i1 %cmp, i32 -1, i32 %add
1536 ; CHECK-LABEL: @uadd_sat_flipped_wrong_bounds2(
1537 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], -12
1538 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1539 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 -1, i32 [[ADD]]
1540 ; CHECK-NEXT: ret i32 [[COND]]
1542 %cmp = icmp ugt i32 %x, -12
1544 %cond = select i1 %cmp, i32 -1, i32 %add
1551 ; CHECK-LABEL: @uadd_sat_flipped_wrong_bounds3(
1552 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], -12
1553 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1554 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 -1, i32 [[ADD]]
1555 ; CHECK-NEXT: ret i32 [[COND]]
1557 %cmp = icmp ugt i32 %x, -12
1559 %cond = select i1 %cmp, i32 -1, i32 %add
1566 ; CHECK-LABEL: @uadd_sat_flipped_wrong_bounds4(
1567 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], -9
1568 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1569 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 -1, i32 [[ADD]]
1570 ; CHECK-NEXT: ret i32 [[COND]]
1572 %cmp = icmp uge i32 %x, -8
1574 %cond = select i1 %cmp, i32 -1, i32 %add
1581 ; CHECK-LABEL: @uadd_sat_flipped_wrong_bounds5(
1582 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], -8
1583 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1584 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[ADD]], i32 -1
1585 ; CHECK-NEXT: ret i32 [[COND]]
1587 %cmp = icmp ult i32 %x, -8
1589 %cond = select i1 %cmp, i32 %add, i32 -1
1596 ; CHECK-LABEL: @uadd_sat_flipped_wrong_bounds6(
1597 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], -11
1598 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1599 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[ADD]], i32 -1
1600 ; CHECK-NEXT: ret i32 [[COND]]
1602 %cmp = icmp ule i32 %x, -12
1604 %cond = select i1 %cmp, i32 %add, i32 -1
1611 ; CHECK-LABEL: @uadd_sat_flipped_wrong_bounds7(
1612 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], -11
1613 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1614 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[ADD]], i32 -1
1615 ; CHECK-NEXT: ret i32 [[COND]]
1617 %cmp = icmp ule i32 %x, -12
1619 %cond = select i1 %cmp, i32 %add, i32 -1
1626 ; CHECK-LABEL: @uadd_sat_flipped_wrong_bounds8(
1627 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], -12
1628 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 9
1629 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 [[ADD]], i32 -1
1630 ; CHECK-NEXT: ret i32 [[COND]]
1632 %cmp = icmp ult i32 %x, -12
1634 %cond = select i1 %cmp, i32 %add, i32 -1
1639 ; CHECK-LABEL: @uadd_sat_nonstrict(
1640 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
1641 ; CHECK-NEXT: ret i32 [[R]]
1643 %notx = xor i32 %x, -1
1646 %r = select i1 %c, i32 -1, i32 %a
1647 ret i32 %r
1650 define i32 @uadd_sat_commute_add(i32 %xp, i32 %y) {
1651 ; CHECK-LABEL: @uadd_sat_commute_add(
1652 ; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XP:%.*]]
1653 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y:%.*]])
1654 ; CHECK-NEXT: ret i32 [[R]]
1656 %x = urem i32 42, %xp ; thwart complexity-based-canonicalization
1657 %notx = xor i32 %x, -1
1660 %r = select i1 %c, i32 -1, i32 %a
1661 ret i32 %r
1665 ; CHECK-LABEL: @uadd_sat_ugt(
1666 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
1667 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
1668 ; CHECK-NEXT: ret i32 [[R]]
1670 %y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
1671 %notx = xor i32 %x, -1
1674 %r = select i1 %c, i32 -1, i32 %a
1675 ret i32 %r
1678 ; CHECK-LABEL: @uadd_sat_uge(
1679 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
1680 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
1681 ; CHECK-NEXT: ret i32 [[R]]
1683 %y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
1684 %notx = xor i32 %x, -1
1687 %r = select i1 %c, i32 -1, i32 %a
1688 ret i32 %r
1691 define <2 x i32> @uadd_sat_ugt_commute_add(<2 x i32> %xp, <2 x i32> %yp) {
1692 ; CHECK-LABEL: @uadd_sat_ugt_commute_add(
1693 ; CHECK-NEXT: [[Y:%.*]] = sdiv <2 x i32> [[YP:%.*]], <i32 2442, i32 4242>
1694 ; CHECK-NEXT: [[X:%.*]] = srem <2 x i32> <i32 42, i32 43>, [[XP:%.*]]
1695 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X]], <2 x i32> [[Y]])
1696 ; CHECK-NEXT: ret <2 x i32> [[R]]
1698 %y = sdiv <2 x i32> %yp, <i32 2442, i32 4242> ; thwart complexity-based-canonicalization
1699 %x = srem <2 x i32> <i32 42, i32 43>, %xp ; thwart complexity-based-canonicalization
1700 %notx = xor <2 x i32> %x, <i32 -1, i32 -1>
1703 %r = select <2 x i1> %c, <2 x i32> <i32 -1, i32 -1>, <2 x i32> %a
1704 ret <2 x i32> %r
1708 ; CHECK-LABEL: @uadd_sat_commute_select(
1709 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
1710 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
1711 ; CHECK-NEXT: ret i32 [[R]]
1713 %y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
1714 %notx = xor i32 %x, -1
1717 %r = select i1 %c, i32 %a, i32 -1
1718 ret i32 %r
1722 ; CHECK-LABEL: @uadd_sat_commute_select_nonstrict(
1723 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
1724 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
1725 ; CHECK-NEXT: ret i32 [[R]]
1727 %y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
1728 %notx = xor i32 %x, -1
1731 %r = select i1 %c, i32 %a, i32 -1
1732 ret i32 %r
1735 define i32 @uadd_sat_commute_select_commute_add(i32 %xp, i32 %yp) {
1736 ; CHECK-LABEL: @uadd_sat_commute_select_commute_add(
1737 ; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XP:%.*]]
1738 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
1739 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y]])
1740 ; CHECK-NEXT: ret i32 [[R]]
1742 %x = urem i32 42, %xp ; thwart complexity-based-canonicalization
1743 %y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
1744 %notx = xor i32 %x, -1
1747 %r = select i1 %c, i32 %a, i32 -1
1748 ret i32 %r
1752 ; CHECK-LABEL: @uadd_sat_commute_select_ugt(
1753 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]])
1754 ; CHECK-NEXT: ret <2 x i32> [[R]]
1756 %notx = xor <2 x i32> %x, <i32 -1, i32 -1>
1759 %r = select <2 x i1> %c, <2 x i32> %a, <2 x i32> <i32 -1, i32 -1>
1760 ret <2 x i32> %r
1763 define i32 @uadd_sat_commute_select_ugt_commute_add(i32 %xp, i32 %y) {
1764 ; CHECK-LABEL: @uadd_sat_commute_select_ugt_commute_add(
1765 ; CHECK-NEXT: [[X:%.*]] = srem i32 42, [[XP:%.*]]
1766 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y:%.*]])
1767 ; CHECK-NEXT: ret i32 [[R]]
1769 %x = srem i32 42, %xp ; thwart complexity-based-canonicalization
1770 %notx = xor i32 %x, -1
1773 %r = select i1 %c, i32 %a, i32 -1
1774 ret i32 %r
1777 ; Negative test - make sure we have a -1 in the select.
1780 ; CHECK-LABEL: @not_uadd_sat(
1781 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], -2
1782 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], 1
1783 ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Y:%.*]]
1784 ; CHECK-NEXT: ret i32 [[R]]
1786 %a = add i32 %x, -2
1788 %r = select i1 %c, i32 %a, i32 %y
1789 ret i32 %r
1792 ; Negative test - make sure the predicate is 'ult'.
1795 ; CHECK-LABEL: @not_uadd_sat2(
1796 ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.umax.i32(i32 [[X1:%.*]], i32 1)
1797 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X]], -2
1798 ; CHECK-NEXT: ret i32 [[A]]
1800 %a = add i32 %x, -2
1802 %r = select i1 %c, i32 %a, i32 -1
1803 ret i32 %r
1809 ; CHECK-LABEL: @uadd_sat_not(
1810 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1811 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
1812 ; CHECK-NEXT: ret i32 [[R]]
1814 %notx = xor i32 %x, -1
1817 %r = select i1 %c, i32 -1, i32 %a
1818 ret i32 %r
1822 ; CHECK-LABEL: @uadd_sat_not_nonstrict(
1823 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1824 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
1825 ; CHECK-NEXT: ret i32 [[R]]
1827 %notx = xor i32 %x, -1
1830 %r = select i1 %c, i32 -1, i32 %a
1831 ret i32 %r
1834 define i32 @uadd_sat_not_commute_add(i32 %xp, i32 %yp) {
1835 ; CHECK-LABEL: @uadd_sat_not_commute_add(
1836 ; CHECK-NEXT: [[X:%.*]] = srem i32 42, [[XP:%.*]]
1837 ; CHECK-NEXT: [[Y:%.*]] = urem i32 42, [[YP:%.*]]
1838 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X]], -1
1839 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y]], i32 [[NOTX]])
1840 ; CHECK-NEXT: ret i32 [[R]]
1842 %x = srem i32 42, %xp ; thwart complexity-based-canonicalization
1843 %y = urem i32 42, %yp ; thwart complexity-based-canonicalization
1844 %notx = xor i32 %x, -1
1847 %r = select i1 %c, i32 -1, i32 %a
1848 ret i32 %r
1852 ; CHECK-LABEL: @uadd_sat_not_ugt(
1853 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1854 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
1855 ; CHECK-NEXT: ret i32 [[R]]
1857 %notx = xor i32 %x, -1
1860 %r = select i1 %c, i32 -1, i32 %a
1861 ret i32 %r
1865 ; CHECK-LABEL: @uadd_sat_not_uge(
1866 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1867 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
1868 ; CHECK-NEXT: ret i32 [[R]]
1870 %notx = xor i32 %x, -1
1873 %r = select i1 %c, i32 -1, i32 %a
1874 ret i32 %r
1878 ; CHECK-LABEL: @uadd_sat_not_ugt_commute_add(
1879 ; CHECK-NEXT: [[Y:%.*]] = sdiv <2 x i32> [[YP:%.*]], <i32 2442, i32 4242>
1880 ; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i32> [[X:%.*]], splat (i32 -1)
1881 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[Y]], <2 x i32> [[NOTX]])
1882 ; CHECK-NEXT: ret <2 x i32> [[R]]
1884 %y = sdiv <2 x i32> %yp, <i32 2442, i32 4242> ; thwart complexity-based-canonicalization
1885 %notx = xor <2 x i32> %x, <i32 -1, i32 -1>
1888 %r = select <2 x i1> %c, <2 x i32> <i32 -1, i32 -1>, <2 x i32> %a
1889 ret <2 x i32> %r
1893 ; CHECK-LABEL: @uadd_sat_not_ugt_commute_add_partial_poison(
1894 ; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i32> [[X:%.*]], <i32 -1, i32 poison>
1895 ; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[YP:%.*]], [[NOTX]]
1896 ; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i32> [[YP]], [[X]]
1897 ; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[C]], <2 x i32> splat (i32 -1), <2 x i32> [[A]]
1898 ; CHECK-NEXT: ret <2 x i32> [[R]]
1900 %notx = xor <2 x i32> %x, <i32 -1, i32 poison>
1903 %r = select <2 x i1> %c, <2 x i32> <i32 -1, i32 -1>, <2 x i32> %a
1904 ret <2 x i32> %r
1908 ; CHECK-LABEL: @uadd_sat_not_commute_select(
1909 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1910 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
1911 ; CHECK-NEXT: ret i32 [[R]]
1913 %notx = xor i32 %x, -1
1916 %r = select i1 %c, i32 %a, i32 -1
1917 ret i32 %r
1921 ; CHECK-LABEL: @uadd_sat_not_commute_select_nonstrict(
1922 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1923 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
1924 ; CHECK-NEXT: ret i32 [[R]]
1926 %notx = xor i32 %x, -1
1929 %r = select i1 %c, i32 %a, i32 -1
1930 ret i32 %r
1934 ; CHECK-LABEL: @uadd_sat_not_commute_select_commute_add(
1935 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 42, [[YP:%.*]]
1936 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1937 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y]], i32 [[NOTX]])
1938 ; CHECK-NEXT: ret i32 [[R]]
1940 %y = sdiv i32 42, %yp ; thwart complexity-based-canonicalization
1941 %notx = xor i32 %x, -1
1944 %r = select i1 %c, i32 %a, i32 -1
1945 ret i32 %r
1948 define <2 x i32> @uadd_sat_not_commute_select_ugt(<2 x i32> %xp, <2 x i32> %yp) {
1949 ; CHECK-LABEL: @uadd_sat_not_commute_select_ugt(
1950 ; CHECK-NEXT: [[X:%.*]] = urem <2 x i32> <i32 42, i32 -42>, [[XP:%.*]]
1951 ; CHECK-NEXT: [[Y:%.*]] = srem <2 x i32> <i32 12, i32 412>, [[YP:%.*]]
1952 ; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i32> [[X]], splat (i32 -1)
1953 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[Y]], <2 x i32> [[NOTX]])
1954 ; CHECK-NEXT: ret <2 x i32> [[R]]
1956 %x = urem <2 x i32> <i32 42, i32 -42>, %xp ; thwart complexity-based-canonicalization
1957 %y = srem <2 x i32> <i32 12, i32 412>, %yp ; thwart complexity-based-canonicalization
1958 %notx = xor <2 x i32> %x, <i32 -1, i32 -1>
1961 %r = select <2 x i1> %c, <2 x i32> %a, <2 x i32> <i32 -1, i32 -1>
1962 ret <2 x i32> %r
1966 ; CHECK-LABEL: @uadd_sat_not_commute_select_ugt_commute_add(
1967 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1968 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
1969 ; CHECK-NEXT: ret i32 [[R]]
1971 %notx = xor i32 %x, -1
1974 %r = select i1 %c, i32 %a, i32 -1
1975 ret i32 %r
1979 ; CHECK-LABEL: @uadd_sat_not_commute_select_uge_commute_add(
1980 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1981 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
1982 ; CHECK-NEXT: ret i32 [[R]]
1984 %notx = xor i32 %x, -1
1987 %r = select i1 %c, i32 %a, i32 -1
1988 ret i32 %r
1992 ; CHECK-LABEL: @uadd_sat_constant(
1993 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 42)
1994 ; CHECK-NEXT: ret i32 [[R]]
1997 %c = icmp ugt i32 %x, -43
1998 %r = select i1 %c, i32 -1, i32 %a
1999 ret i32 %r
2003 ; CHECK-LABEL: @uadd_sat_constant_commute(
2004 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 42)
2005 ; CHECK-NEXT: ret i32 [[R]]
2008 %c = icmp ult i32 %x, -43
2009 %r = select i1 %c, i32 %a, i32 -1
2010 ret i32 %r
2014 ; CHECK-LABEL: @uadd_sat_canon(
2015 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
2016 ; CHECK-NEXT: ret i32 [[R]]
2020 %r = select i1 %c, i32 -1, i32 %a
2021 ret i32 %r
2025 ; CHECK-LABEL: @uadd_sat_canon_y(
2026 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
2027 ; CHECK-NEXT: ret i32 [[R]]
2031 %r = select i1 %c, i32 -1, i32 %a
2032 ret i32 %r
2036 ; CHECK-LABEL: @uadd_sat_canon_nuw(
2037 ; CHECK-NEXT: [[A:%.*]] = add nuw i32 [[X:%.*]], [[Y:%.*]]
2038 ; CHECK-NEXT: ret i32 [[A]]
2042 %r = select i1 %c, i32 -1, i32 %a
2043 ret i32 %r
2047 ; CHECK-LABEL: @uadd_sat_canon_y_nuw(
2048 ; CHECK-NEXT: [[A:%.*]] = add nuw i32 [[X:%.*]], [[Y:%.*]]
2049 ; CHECK-NEXT: ret i32 [[A]]
2053 %r = select i1 %c, i32 -1, i32 %a
2054 ret i32 %r
2058 ; CHECK-LABEL: @uadd_sat_constant_vec(
2059 ; CHECK-NEXT: [[R:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[X:%.*]], <4 x i32> splat (i32 42))
2060 ; CHECK-NEXT: ret <4 x i32> [[R]]
2063 %c = icmp ugt <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
2064 %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
2065 ret <4 x i32> %r
2069 ; CHECK-LABEL: @uadd_sat_constant_vec_commute(
2070 ; CHECK-NEXT: [[R:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[X:%.*]], <4 x i32> splat (i32 42))
2071 ; CHECK-NEXT: ret <4 x i32> [[R]]
2074 %c = icmp ult <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
2075 %r = select <4 x i1> %c, <4 x i32> %a, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
2076 ret <4 x i32> %r
2080 ; CHECK-LABEL: @uadd_sat_constant_vec_commute_undefs(
2081 ; CHECK-NEXT: [[R:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[X:%.*]], <4 x i32> splat (i32 42))
2082 ; CHECK-NEXT: ret <4 x i32> [[R]]
2085 %c = icmp ult <4 x i32> %x, <i32 -43, i32 -43, i32 poison, i32 -43>
2086 %r = select <4 x i1> %c, <4 x i32> %a, <4 x i32> <i32 -1, i32 poison, i32 -1, i32 -1>
2087 ret <4 x i32> %r
2094 ; CHECK-LABEL: @unsigned_sat_variable_using_min_add(
2095 ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
2096 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
2097 ; CHECK-NEXT: ret i32 [[R]]
2099 %y = call i32 @get_i32() ; thwart complexity-based canonicalization
2100 %noty = xor i32 %y, -1
2103 %r = add i32 %s, %y
2104 ret i32 %r
2108 ; CHECK-LABEL: @unsigned_sat_variable_using_min_commute_add(
2109 ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
2110 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
2111 ; CHECK-NEXT: ret i32 [[R]]
2113 %y = call i32 @get_i32() ; thwart complexity-based canonicalization
2114 %noty = xor i32 %y, -1
2117 %r = add i32 %y, %s
2118 ret i32 %r
2122 ; CHECK-LABEL: @unsigned_sat_variable_using_min_commute_select(
2123 ; CHECK-NEXT: [[Y:%.*]] = call <2 x i8> @get_v2i8()
2124 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[Y]])
2125 ; CHECK-NEXT: ret <2 x i8> [[R]]
2127 %y = call <2 x i8> @get_v2i8() ; thwart complexity-based canonicalization
2128 %noty = xor <2 x i8> %y, <i8 -1, i8 -1>
2131 %r = add <2 x i8> %s, %y
2132 ret <2 x i8> %r
2136 ; CHECK-LABEL: @unsigned_sat_variable_using_min_commute_add_select(
2137 ; CHECK-NEXT: [[Y:%.*]] = call <2 x i8> @get_v2i8()
2138 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[Y]])
2139 ; CHECK-NEXT: ret <2 x i8> [[R]]
2141 %y = call <2 x i8> @get_v2i8() ; thwart complexity-based canonicalization
2142 %noty = xor <2 x i8> %y, <i8 -1, i8 -1>
2145 %r = add <2 x i8> %y, %s
2146 ret <2 x i8> %r
2152 ; CHECK-LABEL: @unsigned_sat_variable_using_wrong_min(
2153 ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
2154 ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1
2155 ; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[NOTY]])
2156 ; CHECK-NEXT: [[R:%.*]] = add i32 [[Y]], [[S]]
2157 ; CHECK-NEXT: ret i32 [[R]]
2159 %y = call i32 @get_i32() ; thwart complexity-based canonicalization
2160 %noty = xor i32 %y, -1
2163 %r = add i32 %y, %s
2164 ret i32 %r
2170 ; CHECK-LABEL: @unsigned_sat_variable_using_wrong_value(
2171 ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
2172 ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1
2173 ; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 [[NOTY]])
2174 ; CHECK-NEXT: [[R:%.*]] = add i32 [[Z:%.*]], [[S]]
2175 ; CHECK-NEXT: ret i32 [[R]]
2177 %y = call i32 @get_i32() ; thwart complexity-based canonicalization
2178 %noty = xor i32 %y, -1
2181 %r = add i32 %z, %s
2182 ret i32 %r
2188 ; CHECK-LABEL: @unsigned_sat_constant_using_min(
2189 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 -43)
2190 ; CHECK-NEXT: ret i32 [[R]]
2194 %r = add i32 %s, -43
2195 ret i32 %r
2199 ; CHECK-LABEL: @unsigned_sat_constant_using_min_splat(
2200 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X:%.*]], <2 x i32> splat (i32 -15))
2201 ; CHECK-NEXT: ret <2 x i32> [[R]]
2205 %r = add <2 x i32> %s, <i32 -15, i32 -15>
2206 ret <2 x i32> %r
2212 ; CHECK-LABEL: @unsigned_sat_constant_using_min_wrong_constant(
2213 ; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 42)
2214 ; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[S]], -42
2215 ; CHECK-NEXT: ret i32 [[R]]
2219 %r = add i32 %s, -42
2220 ret i32 %r
2224 ; CHECK-LABEL: @uadd_sat_via_add(
2225 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
2226 ; CHECK-NEXT: ret i32 [[R]]
2230 %r = select i1 %c, i32 -1, i32 %a
2231 ret i32 %r
2235 ; CHECK-LABEL: @uadd_sat_via_add_nonstrict(
2236 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
2237 ; CHECK-NEXT: [[C_NOT:%.*]] = icmp ugt i32 [[A]], [[Y]]
2238 ; CHECK-NEXT: [[R:%.*]] = select i1 [[C_NOT]], i32 [[A]], i32 -1
2239 ; CHECK-NEXT: ret i32 [[R]]
2243 %r = select i1 %c, i32 -1, i32 %a
2244 ret i32 %r
2248 ; CHECK-LABEL: @uadd_sat_via_add_swapped_select(
2249 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
2250 ; CHECK-NEXT: ret i32 [[R]]
2254 %r = select i1 %c, i32 %a, i32 -1
2255 ret i32 %r
2259 ; CHECK-LABEL: @uadd_sat_via_add_swapped_select_strict(
2260 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
2261 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[A]], [[Y]]
2262 ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 -1
2263 ; CHECK-NEXT: ret i32 [[R]]
2267 %r = select i1 %c, i32 %a, i32 -1
2268 ret i32 %r
2272 ; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp(
2273 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
2274 ; CHECK-NEXT: ret i32 [[R]]
2278 %r = select i1 %c, i32 -1, i32 %a
2279 ret i32 %r
2283 ; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp_nonstrict(
2284 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
2285 ; CHECK-NEXT: [[C_NOT:%.*]] = icmp ult i32 [[Y]], [[A]]
2286 ; CHECK-NEXT: [[R:%.*]] = select i1 [[C_NOT]], i32 [[A]], i32 -1
2287 ; CHECK-NEXT: ret i32 [[R]]
2291 %r = select i1 %c, i32 -1, i32 %a
2292 ret i32 %r
2296 ; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp_nonstric(
2297 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
2298 ; CHECK-NEXT: ret i32 [[R]]
2302 %r = select i1 %c, i32 %a, i32 -1
2303 ret i32 %r
2307 ; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp_select_nonstrict(
2308 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
2309 ; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[Y]], [[A]]
2310 ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 -1
2311 ; CHECK-NEXT: ret i32 [[R]]
2315 %r = select i1 %c, i32 %a, i32 -1
2316 ret i32 %r
2320 ; CHECK-LABEL: @fold_add_umax_to_usub(
2321 ; CHECK-NEXT: [[SEL:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
2322 ; CHECK-NEXT: ret i8 [[SEL]]
2325 %sel = add i8 %umax, -10
2330 ; CHECK-LABEL: @fold_add_umax_to_usub_incorrect_rhs(
2331 ; CHECK-NEXT: [[UMAX:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 10)
2332 ; CHECK-NEXT: [[SEL:%.*]] = add i8 [[UMAX]], -11
2333 ; CHECK-NEXT: ret i8 [[SEL]]
2336 %sel = add i8 %umax, -11
2341 ; CHECK-LABEL: @fold_add_umax_to_usub_multiuse(
2342 ; CHECK-NEXT: [[UMAX:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 10)
2343 ; CHECK-NEXT: call void @usei8(i8 [[UMAX]])
2344 ; CHECK-NEXT: [[SEL:%.*]] = add i8 [[UMAX]], -10
2345 ; CHECK-NEXT: ret i8 [[SEL]]
2349 %sel = add i8 %umax, -10