Lines Matching refs:overflow

4 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone
5 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) nounwind readnone
6 declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) nounwind readnone
7 declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) nounwind readnone
8 declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone
9 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone
10 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
11 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
12 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
13 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
14 declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
15 declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
22 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
37 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B)
54 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B)
66 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 %A)
78 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 0, i8 %A)
90 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 -4)
100 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
116 %x = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %aa, i32 %bb)
130 %x = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %aa, i32 %bb)
144 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %aa, i32 %bb)
158 %x = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %aa, i32 %bb)
172 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb)
186 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb)
194 ; CHECK-NEXT: [[X:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[AA]], i32 [[BB]])
199 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb)
213 %x = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %aa, i32 %bb)
222 %x = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 0, i8 %A)
234 %x = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 %A)
248 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %shr, i32 3)
264 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %shr, i32 4)
281 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %or_x, i32 %or_y)
290 %t = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %div, i32 1)
302 %t = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %div, i32 1)
312 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
320 ; CHECK-NEXT: [[T:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[REM]], i32 [[REM]])
325 ; This may overflow because the result of the mul operands may be greater than 16bits
327 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
338 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
351 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 0, i32 %aa)
360 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 0)
366 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
372 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
382 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
388 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
399 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X]], i32 [[Y:%.*]])
406 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
417 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y]])
424 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
434 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
440 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 42)
450 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 1)
456 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 1)
466 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 -1)
472 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 -1)
482 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
485 %a = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 42, i32 %x)
491 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
494 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 42, i32 %x)
500 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 42, i32 [[X:%.*]])
503 %a = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 42, i32 %x)
509 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 42, i32 [[X:%.*]])
512 %a = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 42, i32 %x)
518 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[X:%.*]], i32 42)
521 %a = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 42, i32 %x)
527 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 42)
530 %a = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 42, i32 %x)
534 ; Always overflow tests
543 %a = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %y, i8 64)
555 %a = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 63, i8 %y)
566 %a = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %y, i8 2)
579 %a = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %y, i8 28)
592 %a = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 -100, i8 %y)
599 ; CHECK-NEXT: [[A:%.*]] = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[Y]], i8 2)
604 %a = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %y, i8 2)
608 declare { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8>, <4 x i8>)
609 declare { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8>, <4 x i8>)
610 declare { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8>, <4 x i8>)
611 declare { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8>, <4 x i8>)
612 declare { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8>, <4 x i8>)
613 declare { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8>, <4 x i8>)
615 ; Always overflow
621 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
629 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
637 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 -128, i8 -128, i8 -128, i8 -128>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
645 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 0, i8 0, i8 0, i8 0>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
649 ; NOTE: LLVM doesn't (yet) detect the multiplication always results in a overflow
652 ; CHECK-NEXT: [[X:%.*]] = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> splat (i8 127), <4 x i8> splat (i8 3))
655 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i8> <i8 3, i8 3, i8 3, i8 3>)
663 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 3, i8 3, i8 3, i8 3>)
667 ; Never overflow
673 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 -10, i8 -20, i8 30, i8 40>, <4 x i8> <i8 -40, i8 10, i8 -30, i8 20>)
681 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 0, i8 32, i8 64, i8 16>, <4 x i8> <i8 32, i8 32, i8 32, i8 32>)
689 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 -10, i8 -10, i8 -10, i8 -10>, <4 x i8> <i8 -10, i8 -20, i8 -30, i8 -40>)
697 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 128, i8 0, i8 255, i8 1>)
705 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 -6, i8 -6, i8 -6, i8 -6>, <4 x i8> <i8 9, i8 3, i8 10, i8 15>)
713 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 15, i8 15, i8 15, i8 15>, <4 x i8> <i8 15, i8 8, i8 4, i8 2>)
723 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>)
731 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>)
739 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>)
747 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>)
755 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
763 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
774 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 -1)
788 %m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>)
802 %m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 poison, i8 -1>)
811 ; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[X:%.*]], i8 -2)
817 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 -2)
831 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 -1)
845 %m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>)
859 %m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 -1, i8 -1, i8 poison>)
872 %m = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -3, i8 -3, i8 poison, i8 -3>)
885 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 -1)
899 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 2)
913 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 8)
927 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 64)
939 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 256)
953 %m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 4, i8 4, i8 poison>)
964 ; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 3)
970 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 3)
985 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 4)
1000 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 16)
1015 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 32)
1029 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 128)
1044 %m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 2, i8 2, i8 poison>)
1055 ; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[X:%.*]], i8 7)
1061 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 7)