1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt %s -passes=instsimplify -S | FileCheck %s 3 4; Here we add unsigned two values, check that addition did not underflow AND 5; that the result is non-zero. This can be simplified just to a comparison 6; between the base and negated offset. 7 8declare void @use8(i8) 9 10declare void @use1(i1) 11declare void @llvm.assume(i1) 12 13; If we are checking that the result is not null or no underflow happened, 14; it is tautological (always-true). 15define i1 @t1(i8 %base, i8 %offset) { 16; CHECK-LABEL: @t1( 17; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0 18; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 19; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] 20; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) 21; CHECK-NEXT: ret i1 true 22; 23 %cmp = icmp slt i8 %base, 0 24 call void @llvm.assume(i1 %cmp) 25 26 %adjusted = add i8 %base, %offset 27 call void @use8(i8 %adjusted) 28 %not_null = icmp ne i8 %adjusted, 0 29 %no_underflow = icmp ult i8 %adjusted, %base 30 %r = or i1 %not_null, %no_underflow 31 ret i1 %r 32} 33define i1 @t2_commutative(i8 %base, i8 %offset) { 34; CHECK-LABEL: @t2_commutative( 35; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0 36; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 37; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] 38; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) 39; CHECK-NEXT: ret i1 true 40; 41 %cmp = icmp slt i8 %base, 0 42 call void @llvm.assume(i1 %cmp) 43 44 %adjusted = add i8 %base, %offset 45 call void @use8(i8 %adjusted) 46 %not_null = icmp ne i8 %adjusted, 0 47 %no_underflow = icmp ugt i8 %base, %adjusted 48 %r = or i1 %not_null, %no_underflow 49 ret i1 %r 50} 51 52; If we are checking that the result is null and underflow happened, 53; it is tautological (always-false). 54define i1 @t3(i8 %base, i8 %offset) { 55; CHECK-LABEL: @t3( 56; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0 57; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 58; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] 59; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) 60; CHECK-NEXT: ret i1 false 61; 62 %cmp = icmp slt i8 %base, 0 63 call void @llvm.assume(i1 %cmp) 64 65 %adjusted = add i8 %base, %offset 66 call void @use8(i8 %adjusted) 67 %not_null = icmp eq i8 %adjusted, 0 68 %no_underflow = icmp uge i8 %adjusted, %base 69 %r = and i1 %not_null, %no_underflow 70 ret i1 %r 71} 72define i1 @t4_commutative(i8 %base, i8 %offset) { 73; CHECK-LABEL: @t4_commutative( 74; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0 75; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 76; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] 77; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) 78; CHECK-NEXT: ret i1 false 79; 80 %cmp = icmp slt i8 %base, 0 81 call void @llvm.assume(i1 %cmp) 82 83 %adjusted = add i8 %base, %offset 84 call void @use8(i8 %adjusted) 85 %not_null = icmp eq i8 %adjusted, 0 86 %no_underflow = icmp ule i8 %base, %adjusted 87 %r = and i1 %not_null, %no_underflow 88 ret i1 %r 89} 90 91; We only need to know that any of the 'add' operands is non-zero, 92; not necessarily the one used in the comparison. 93define i1 @t5(i8 %base, i8 %offset) { 94; CHECK-LABEL: @t5( 95; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0 96; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 97; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]] 98; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) 99; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 100; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] 101; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]] 102; CHECK-NEXT: ret i1 [[R]] 103; 104 %cmp = icmp slt i8 %offset, 0 105 call void @llvm.assume(i1 %cmp) 106 107 %adjusted = add i8 %base, %offset 108 call void @use8(i8 %adjusted) 109 %not_null = icmp ne i8 %adjusted, 0 110 %no_underflow = icmp ult i8 %adjusted, %base 111 %r = or i1 %not_null, %no_underflow 112 ret i1 %r 113} 114