1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=instcombine -S | FileCheck %s 3 4declare { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32>, <2 x i32>) 5 6declare { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8>, <2 x i8>) 7 8declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) 9 10declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) 11 12define { i32, i1 } @simple_fold(i32 %x) { 13; CHECK-LABEL: @simple_fold( 14; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 20) 15; CHECK-NEXT: ret { i32, i1 } [[B]] 16; 17 %a = add nuw i32 %x, 7 18 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 13) 19 ret { i32, i1 } %b 20} 21 22define { i8, i1 } @fold_on_constant_add_no_overflow(i8 %x) { 23; CHECK-LABEL: @fold_on_constant_add_no_overflow( 24; CHECK-NEXT: [[B:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[X:%.*]], i8 -1) 25; CHECK-NEXT: ret { i8, i1 } [[B]] 26; 27 %a = add nuw i8 %x, 200 28 %b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 55) 29 ret { i8, i1 } %b 30} 31 32define { i8, i1 } @no_fold_on_constant_add_overflow(i8 %x) { 33; CHECK-LABEL: @no_fold_on_constant_add_overflow( 34; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 poison, i1 true }, i8 [[X:%.*]], 0 35; CHECK-NEXT: ret { i8, i1 } [[TMP1]] 36; 37 %a = add nuw i8 %x, 200 38 %b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 56) 39 ret { i8, i1 } %b 40} 41 42define { <2 x i8>, <2 x i1> } @no_fold_vector_no_overflow(<2 x i8> %x) { 43; CHECK-LABEL: @no_fold_vector_no_overflow( 44; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 -57, i8 -56> 45; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[A]], <2 x i8> splat (i8 55)) 46; CHECK-NEXT: ret { <2 x i8>, <2 x i1> } [[B]] 47; 48 %a = add nuw <2 x i8> %x, <i8 199, i8 200> 49 %b = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %a, <2 x i8> <i8 55, i8 55>) 50 ret { <2 x i8>, <2 x i1> } %b 51} 52 53define { <2 x i8>, <2 x i1> } @no_fold_vector_overflow(<2 x i8> %x) { 54; CHECK-LABEL: @no_fold_vector_overflow( 55; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 -56, i8 -55> 56; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[A]], <2 x i8> splat (i8 55)) 57; CHECK-NEXT: ret { <2 x i8>, <2 x i1> } [[B]] 58; 59 %a = add nuw <2 x i8> %x, <i8 200, i8 201> 60 %b = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %a, <2 x i8> <i8 55, i8 55>) 61 ret { <2 x i8>, <2 x i1> } %b 62} 63 64define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32> %x) { 65; CHECK-LABEL: @fold_simple_splat_constant( 66; CHECK-NEXT: [[B:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> splat (i32 42)) 67; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]] 68; 69 %a = add nuw <2 x i32> %x, <i32 12, i32 12> 70 %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>) 71 ret { <2 x i32>, <2 x i1> } %b 72} 73 74define { <2 x i32>, <2 x i1> } @no_fold_splat_undef_constant(<2 x i32> %x) { 75; CHECK-LABEL: @no_fold_splat_undef_constant( 76; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], <i32 12, i32 undef> 77; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> splat (i32 30)) 78; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]] 79; 80 %a = add nuw <2 x i32> %x, <i32 12, i32 undef> 81 %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>) 82 ret { <2 x i32>, <2 x i1> } %b 83} 84 85define { <2 x i32>, <2 x i1> } @no_fold_splat_not_constant(<2 x i32> %x, <2 x i32> %y) { 86; CHECK-LABEL: @no_fold_splat_not_constant( 87; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], [[Y:%.*]] 88; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> splat (i32 30)) 89; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]] 90; 91 %a = add nuw <2 x i32> %x, %y 92 %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>) 93 ret { <2 x i32>, <2 x i1> } %b 94} 95 96define { i32, i1 } @fold_nuwnsw(i32 %x) { 97; CHECK-LABEL: @fold_nuwnsw( 98; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42) 99; CHECK-NEXT: ret { i32, i1 } [[B]] 100; 101 %a = add nuw nsw i32 %x, 12 102 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30) 103 ret { i32, i1 } %b 104} 105 106define { i32, i1 } @no_fold_nsw(i32 %x) { 107; CHECK-LABEL: @no_fold_nsw( 108; CHECK-NEXT: [[A:%.*]] = add nsw i32 [[X:%.*]], 12 109; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30) 110; CHECK-NEXT: ret { i32, i1 } [[B]] 111; 112 %a = add nsw i32 %x, 12 113 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30) 114 ret { i32, i1 } %b 115} 116 117define { i32, i1 } @no_fold_wrapped_add(i32 %x) { 118; CHECK-LABEL: @no_fold_wrapped_add( 119; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], 12 120; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30) 121; CHECK-NEXT: ret { i32, i1 } [[B]] 122; 123 %a = add i32 %x, 12 124 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 30, i32 %a) 125 ret { i32, i1 } %b 126} 127 128 129define { <2 x i32>, <2 x i1> } @fold_simple_splat_with_disjoint_or_constant(<2 x i32> %x) { 130; CHECK-LABEL: @fold_simple_splat_with_disjoint_or_constant( 131; CHECK-NEXT: [[B:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> splat (i32 42)) 132; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]] 133; 134 %a = or disjoint <2 x i32> %x, <i32 12, i32 12> 135 %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>) 136 ret { <2 x i32>, <2 x i1> } %b 137} 138 139 140define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant_with_or_fail(<2 x i32> %x) { 141; CHECK-LABEL: @fold_simple_splat_constant_with_or_fail( 142; CHECK-NEXT: [[A:%.*]] = or <2 x i32> [[X:%.*]], splat (i32 12) 143; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> splat (i32 30)) 144; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]] 145; 146 %a = or <2 x i32> %x, <i32 12, i32 12> 147 %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>) 148 ret { <2 x i32>, <2 x i1> } %b 149} 150