1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -instcombine -S | FileCheck %s 3 4declare { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32>, <2 x i32>) 5 6declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) 7 8declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) 9 10define { i32, i1 } @simple_fold(i32 %x) { 11; CHECK-LABEL: @simple_fold( 12; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 20) 13; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 14; 15 %a = add nsw i32 %x, 7 16 %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 13) 17 ret { i32, i1 } %b 18} 19 20define { i32, i1 } @fold_mixed_signs(i32 %x) { 21; CHECK-LABEL: @fold_mixed_signs( 22; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 6) 23; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 24; 25 %a = add nsw i32 %x, 13 26 %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 -7) 27 ret { i32, i1 } %b 28} 29 30define { i8, i1 } @fold_on_constant_add_no_overflow(i8 %x) { 31; CHECK-LABEL: @fold_on_constant_add_no_overflow( 32; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[X:%.*]], i8 127) 33; CHECK-NEXT: ret { i8, i1 } [[TMP1]] 34; 35 %a = add nsw i8 %x, 100 36 %b = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %a, i8 27) 37 ret { i8, i1 } %b 38} 39 40define { i8, i1 } @no_fold_on_constant_add_overflow(i8 %x) { 41; CHECK-LABEL: @no_fold_on_constant_add_overflow( 42; CHECK-NEXT: [[A:%.*]] = add nsw i8 [[X:%.*]], 100 43; CHECK-NEXT: [[B:%.*]] = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A]], i8 28) 44; CHECK-NEXT: ret { i8, i1 } [[B]] 45; 46 %a = add nsw i8 %x, 100 47 %b = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %a, i8 28) 48 ret { i8, i1 } %b 49} 50 51define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32> %x) { 52; CHECK-LABEL: @fold_simple_splat_constant( 53; CHECK-NEXT: [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 42, i32 42>) 54; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[TMP1]] 55; 56 %a = add nsw <2 x i32> %x, <i32 12, i32 12> 57 %b = tail call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>) 58 ret { <2 x i32>, <2 x i1> } %b 59} 60 61define { <2 x i32>, <2 x i1> } @no_fold_splat_undef_constant(<2 x i32> %x) { 62; CHECK-LABEL: @no_fold_splat_undef_constant( 63; CHECK-NEXT: [[A:%.*]] = add nsw <2 x i32> [[X:%.*]], <i32 12, i32 undef> 64; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>) 65; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]] 66; 67 %a = add nsw <2 x i32> %x, <i32 12, i32 undef> 68 %b = tail call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>) 69 ret { <2 x i32>, <2 x i1> } %b 70} 71 72define { <2 x i32>, <2 x i1> } @no_fold_splat_not_constant(<2 x i32> %x, <2 x i32> %y) { 73; CHECK-LABEL: @no_fold_splat_not_constant( 74; CHECK-NEXT: [[A:%.*]] = add nsw <2 x i32> [[X:%.*]], [[Y:%.*]] 75; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>) 76; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]] 77; 78 %a = add nsw <2 x i32> %x, %y 79 %b = tail call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>) 80 ret { <2 x i32>, <2 x i1> } %b 81} 82 83define { i32, i1 } @fold_nuwnsw(i32 %x) { 84; CHECK-LABEL: @fold_nuwnsw( 85; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 42) 86; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 87; 88 %a = add nuw nsw i32 %x, 12 89 %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 30) 90 ret { i32, i1 } %b 91} 92 93define { i32, i1 } @no_fold_nuw(i32 %x) { 94; CHECK-LABEL: @no_fold_nuw( 95; CHECK-NEXT: [[A:%.*]] = add nuw i32 [[X:%.*]], 12 96; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A]], i32 30) 97; CHECK-NEXT: ret { i32, i1 } [[B]] 98; 99 %a = add nuw i32 %x, 12 100 %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 30) 101 ret { i32, i1 } %b 102} 103 104define { i32, i1 } @no_fold_wrapped_add(i32 %x) { 105; CHECK-LABEL: @no_fold_wrapped_add( 106; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], 12 107; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A]], i32 30) 108; CHECK-NEXT: ret { i32, i1 } [[B]] 109; 110 %a = add i32 %x, 12 111 %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 30, i32 %a) 112 ret { i32, i1 } %b 113} 114 115define { i32, i1 } @fold_sub_simple(i32 %x) { 116; CHECK-LABEL: @fold_sub_simple( 117; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 42) 118; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 119; 120 %a = sub nsw i32 %x, -12 121 %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 30) 122 ret { i32, i1 } %b 123} 124