/llvm-project/llvm/test/Analysis/CostModel/AArch64/ |
H A D | arith-overflow.ll | 7 declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) 8 declare {<2 x i64>, <2 x i1>} @llvm.sadd.with.overflow.v2i64(<2 x i64>, <2 x i64>) 9 declare {<4 x i64>, <4 x i1>} @llvm.sadd.with.overflow.v4i64(<4 x i64>, <4 x i64>) 10 declare {<8 x i64>, <8 x i1>} @llvm.sadd.with.overflow.v8i64(<8 x i64>, <8 x i64>) 12 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) 13 declare {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) 14 declare {<8 x i32>, <8 x i1>} @llvm.sadd.with.overflow.v8i32(<8 x i32>, <8 x i32>) 15 declare {<16 x i32>, <16 x i1>} @llvm.sadd.with.overflow.v16i32(<16 x i32>, <16 x i32>) 17 declare {i16, i1} @llvm.sadd.with.overflow.i16(i16, i16) 18 declare {<8 x i16>, <8 x i1>} @llvm.sadd.with.overflow.v8i16(<8 x i16>, <8 x i16>) [all …]
|
/llvm-project/llvm/test/Transforms/InstCombine/ |
H A D | umul-sign-check.ll | 4 ; Check that we simplify llvm.umul.with.overflow, if the overflow check is 5 ; weakened by or (icmp ne %res, 0) %overflow. This is generated by code using 12 declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #0 24 %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 25 %overflow = extractvalue { i64, i1 } %res, 1 28 %overflow.1 = or i1 %overflow, %cmp 30 ret i1 %overflow.1 43 %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 44 %overflow = extractvalue { i64, i1 } %res, 1 47 %overflow.1 = select i1 %overflow, i1 true, i1 %cmp [all …]
|
H A D | saddo.ll | 4 declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) 5 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) 9 ; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A:%.*]], i6… 13 %res = tail call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) 14 %overflow = extractvalue { i64, i1 } %res, 1 15 ret i1 %overflow 22 %res = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %a, i8 0) 23 %overflow = extractvalue { i8, i1 } %res, 1 24 ret i1 %overflow 32 %res = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %a, i8 1) [all …]
|
H A D | uaddo2.ll | 4 declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) 5 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) 9 ; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A:%.*]], i6… 13 %res = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) 14 %overflow = extractvalue { i64, i1 } %res, 1 15 ret i1 %overflow 22 %res = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 0) 23 %overflow = extractvalue { i8, i1 } %res, 1 24 ret i1 %overflow 32 %res = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 1) [all …]
|
H A D | umulo.ll | 4 declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) 5 declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) 6 declare { i1, i1 } @llvm.umul.with.overflow.i1(i1, i1) 7 declare { <2 x i1>, <2 x i1> } @llvm.umul.with.overflow.v2i1(<2 x i1>, <2 x i1>) 11 ; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i6… 15 %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 16 %overflow = extractvalue { i64, i1 } %res, 1 17 ret i1 %overflow 24 %res = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %a, i8 0) 25 %overflow = extractvalue { i8, i1 } %res, 1 [all …]
|
H A D | smulo.ll | 4 declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64) 5 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) 6 declare { i1, i1 } @llvm.smul.with.overflow.i1(i1, i1) 7 declare { <2 x i1>, <2 x i1> } @llvm.smul.with.overflow.v2i1(<2 x i1>, <2 x i1>) 11 ; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A:%.*]], i6… 15 %res = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 16 %overflow = extractvalue { i64, i1 } %res, 1 17 ret i1 %overflow 24 %res = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 0) 25 %overflow = extractvalue { i8, i1 } %res, 1 [all …]
|
H A D | ssubo.ll | 4 declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) 5 declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) 11 ; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 [[A:%.*]], i6… 15 %res = tail call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) 16 %overflow = extractvalue { i64, i1 } %res, 1 17 ret i1 %overflow 24 %res = tail call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 %a, i8 0) 25 %overflow = extractvalue { i8, i1 } %res, 1 26 ret i1 %overflow 34 %res = tail call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 %a, i8 1) [all …]
|
H A D | usubo.ll | 4 declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) 5 declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) 14 %res = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 15 %overflow = extractvalue { i64, i1 } %res, 1 16 ret i1 %overflow 23 %res = tail call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %a, i8 0) 24 %overflow = extractvalue { i8, i1 } %res, 1 25 ret i1 %overflow 33 %res = tail call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %a, i8 1) 34 %overflow = extractvalue { i8, i1 } %res, 1 [all …]
|
H A D | ssub-with-overflow.ll | 4 declare { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32>, <2 x i32>) 6 declare { <2 x i8>, <2 x i1> } @llvm.ssub.with.overflow.v2i8(<2 x i8>, <2 x i8>) 8 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) 10 declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) 14 ; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 -20) 18 %b = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 13) 29 %b = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 -7) 35 ; CHECK-NEXT: [[B:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[X:%.*]], i8 -128) 39 %b = tail call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 %a, i8 28) 46 ; CHECK-NEXT: [[B:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow [all...] |
H A D | sadd-with-overflow.ll | 4 declare { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32>, <2 x i32>) 6 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) 8 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) 12 ; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 20) 16 %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 13) 27 %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 -7) 33 ; CHECK-NEXT: [[B:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[X:%.*]], i8 127) 37 %b = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %a, i8 27) 44 ; CHECK-NEXT: [[B:%.*]] = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A]], i8 28) 48 %b = tail call { i8, i1 } @llvm.sadd.with.overflow [all...] |
H A D | uadd-with-overflow.ll | 4 declare { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32>, <2 x i32>) 6 declare { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8>, <2 x i8>) 8 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) 10 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) 14 ; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 20) 18 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 13) 24 ; CHECK-NEXT: [[B:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[X:%.*]], i8 -1) 28 %b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 55) 38 %b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 56) 45 ; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow [all...] |
H A D | overflow-mul.ll | 20 ; CHECK-NEXT: [[UMUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]]) 29 %overflow = icmp ugt i64 %mul64, 4294967295 30 %retval = zext i1 %overflow to i32 38 ; CHECK-NEXT: [[UMUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]]) 47 %overflow = icmp uge i64 %mul64, 4294967296 48 %retval = zext i1 %overflow to i32 57 ; CHECK-NEXT: [[UMUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]]) 67 %overflow = icmp ugt i64 %mul64, 4294967295 69 %retval = select i1 %overflow, i32 %mul32, i32 111 89 %overflow [all...] |
H A D | with_overflow.ll | 4 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone 5 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) nounwind readnone 6 declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) nounwind readnone 7 declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) nounwind readnone 8 declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone 9 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone 10 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone 11 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone 12 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone 13 declare { i32, i1 } @llvm.usub.with.overflow [all...] |
/llvm-project/llvm/test/Analysis/CostModel/ARM/ |
H A D | arith-overflow.ll | 11 declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) 12 declare {<2 x i64>, <2 x i1>} @llvm.sadd.with.overflow.v2i64(<2 x i64>, <2 x i64>) 13 declare {<4 x i64>, <4 x i1>} @llvm.sadd.with.overflow.v4i64(<4 x i64>, <4 x i64>) 14 declare {<8 x i64>, <8 x i1>} @llvm.sadd.with.overflow.v8i64(<8 x i64>, <8 x i64>) 16 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) 17 declare {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) 18 declare {<8 x i32>, <8 x i1>} @llvm.sadd.with.overflow.v8i32(<8 x i32>, <8 x i32>) 19 declare {<16 x i32>, <16 x i1>} @llvm.sadd.with.overflow.v16i32(<16 x i32>, <16 x i32>) 21 declare {i16, i1} @llvm.sadd.with.overflow.i16(i16, i16) 22 declare {<8 x i16>, <8 x i1>} @llvm.sadd.with.overflow [all...] |
/llvm-project/llvm/test/Analysis/CostModel/X86/ |
H A D | arith-overflow.ll | 15 ; sadd.with.overflow 18 declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) 19 declare {<2 x i64>, <2 x i1>} @llvm.sadd.with.overflow.v2i64(<2 x i64>, <2 x i64>) 20 declare {<4 x i64>, <4 x i1>} @llvm.sadd.with.overflow.v4i64(<4 x i64>, <4 x i64>) 21 declare {<8 x i64>, <8 x i1>} @llvm.sadd.with.overflow.v8i64(<8 x i64>, <8 x i64>) 23 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) 24 declare {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) 25 declare {<8 x i32>, <8 x i1>} @llvm.sadd.with.overflow.v8i32(<8 x i32>, <8 x i32>) 26 declare {<16 x i32>, <16 x i1>} @llvm.sadd.with.overflow.v16i32(<16 x i32>, <16 x i32>) 28 declare {i16, i1} @llvm.sadd.with.overflow [all...] |
/llvm-project/llvm/test/CodeGen/PowerPC/ |
H A D | saddo-ssubo.ll | 4 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) nounwind readnone 5 declare { i16, i1 } @llvm.sadd.with.overflow.i16(i16, i16) nounwind readnone 6 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone 7 declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone 8 declare { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone 9 declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) nounwind readnone 10 declare { i16, i1 } @llvm.ssub.with.overflow.i16(i16, i16) nounwind readnone 11 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone 12 declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone 13 declare { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow [all...] |
/llvm-project/llvm/test/Transforms/InstSimplify/ConstProp/ |
H A D | overflow-ops.ll | 4 declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8) 5 declare {i8, i1} @llvm.usub.with.overflow.i8(i8, i8) 6 declare {i8, i1} @llvm.umul.with.overflow.i8(i8, i8) 8 declare {i8, i1} @llvm.sadd.with.overflow.i8(i8, i8) 9 declare {i8, i1} @llvm.ssub.with.overflow.i8(i8, i8) 10 declare {i8, i1} @llvm.smul.with.overflow.i8(i8, i8) 20 %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 42, i8 100) 28 %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 142, i8 120) 36 %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 142, i8 undef) 48 %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 2) [all …]
|
/llvm-project/llvm/test/CodeGen/Generic/ |
H A D | overflow.ll | 2 ; Verify codegen's don't crash on overflow intrinsics. 8 %sadd = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %a, i8 %b) 15 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) nounwind readnone 19 %sadd = tail call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 %a, i16 %b) 26 declare { i16, i1 } @llvm.sadd.with.overflow.i16(i16, i16) nounwind readnone 30 %sadd = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) 37 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone 44 %uadd = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) 51 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone 55 %uadd = tail call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %a, i16 %b) [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/ |
H A D | overflow.ll | 13 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) 28 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 9, i32 11) 43 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 0) 58 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 0) 73 %t = call {i64, i1} @llvm.sadd.with.overflow.64(i64 %lhs, i64 %rhs) 87 %t = call {i64, i1} @llvm.uadd.with.overflow.64(i64 %lhs, i64 %rhs) 100 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 0, i32 %v5) 122 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %lhs, i32 -100) 146 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %lhs, i32 5) 161 %t = call {i32, i1} @llvm.uadd.with.overflow [all...] |
/llvm-project/llvm/test/tools/llvm-profdata/ |
H A D | overflow-instr.test | 1 Tests for overflow when merging instrumented profiles. 3 1- Merge profile having maximum counts with itself and verify overflow detected and saturation occu… 4 RUN: llvm-profdata merge -instr %p/Inputs/overflow-instr.proftext %p/Inputs/overflow-instr.proftext… 6 MERGE_OVERFLOW: {{.*}}.proftext: overflow: counter overflow 10 2- Merge profile having maximum counts by itself and verify no overflow 11 RUN: llvm-profdata merge -instr %p/Inputs/overflow-instr.proftext -o %t.out 2>&1 | FileCheck %s -ch… 13 MERGE_NO_OVERFLOW-NOT: {{.*}}.proftext: overflow: counter overflow
|
/llvm-project/llvm/test/CodeGen/ARM/ |
H A D | unschedule-first-call.ll | 20 %13 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef) 22 %15 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %14, i64 1) 27 %20 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %18, i64 %19) 29 %22 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %21, i64 0) 31 %24 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %23, i64 undef) 33 %26 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %25, i64 0) 41 %34 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 0, i64 undef) 43 %36 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %35, i64 1) 48 %41 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %39, i64 %40) 50 %43 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %42, i64 0) [all …]
|
/llvm-project/llvm/test/CodeGen/X86/ |
H A D | overflow-intrinsic-setcc-fold.ll | 11 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) 24 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) 37 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) 50 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) 63 %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) 76 %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) 89 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) 102 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) 115 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 128 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) [all …]
|
H A D | xaluo.ll | 7 ; Get the actual value of the overflow bit. 25 %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 %v2) 47 %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 %v2) 69 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) 91 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) 114 %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 1) 136 %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 1) 158 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 1) 180 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 1) 203 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 2, i64 %v1) [all …]
|
/llvm-project/llvm/test/Transforms/SCCP/ |
H A D | with.overflow.ll | 4 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) 5 declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) 6 declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) 7 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) 8 declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) 9 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) 10 declare { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8>, <2 x i8>) 22 ; CHECK-NEXT: [[WO1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[V0_100]], i8 [[V0_1… 24 ; CHECK-NEXT: [[WO2:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[V0_100]], i8 [[V0_1… 27 ; CHECK-NEXT: [[WO3:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[V100_255]], i8 [[V0… [all …]
|
/llvm-project/llvm/test/Transforms/CorrelatedValuePropagation/ |
H A D | overflow_predicate.ll | 5 declare {i8, i1} @llvm.uadd.with.overflow(i8, i8) 6 declare {i8, i1} @llvm.sadd.with.overflow(i8, i8) 7 declare {i8, i1} @llvm.usub.with.overflow(i8, i8) 8 declare {i8, i1} @llvm.ssub.with.overflow(i8, i8) 9 declare {i8, i1} @llvm.umul.with.overflow(i8, i8) 10 declare {i8, i1} @llvm.smul.with.overflow(i8, i8) 14 ; CHECK-NEXT: [[VAL_OV:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[X:%.*]], i8 100) 27 %val_ov = call {i8, i1} @llvm.uadd.with.overflow(i8 %x, i8 100) 46 ; CHECK-NEXT: [[VAL_OV:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[X:%.*]], i8 100) 51 ; CHECK: overflow [all...] |