1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=instcombine -S < %s | FileCheck %s 3 4declare void @use(i32) 5 6define i32 @low_mask_nsw_nuw(i32 %x) { 7; CHECK-LABEL: @low_mask_nsw_nuw( 8; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 31 9; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[AND]], 63 10; CHECK-NEXT: ret i32 [[SUB]] 11; 12 %and = and i32 %x, 31 13 %sub = sub i32 63, %and 14 ret i32 %sub 15} 16 17define <2 x i32> @low_mask_nsw_nuw_vec(<2 x i32> %x) { 18; CHECK-LABEL: @low_mask_nsw_nuw_vec( 19; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], splat (i32 31) 20; CHECK-NEXT: [[SUB:%.*]] = xor <2 x i32> [[AND]], splat (i32 63) 21; CHECK-NEXT: ret <2 x i32> [[SUB]] 22; 23 %and = and <2 x i32> %x, <i32 31, i32 31> 24 %sub = sub <2 x i32> <i32 63, i32 63>, %and 25 ret <2 x i32> %sub 26} 27 28define i8 @arbitrary_mask_sub_i8(i8 %x) { 29; CHECK-LABEL: @arbitrary_mask_sub_i8( 30; CHECK-NEXT: [[A:%.*]] = and i8 [[X:%.*]], 10 31; CHECK-NEXT: [[M:%.*]] = sub nuw nsw i8 11, [[A]] 32; CHECK-NEXT: ret i8 [[M]] 33; 34 %a = and i8 %x, 10 ; 0b00001010 35 %m = sub i8 11, %a ; 0b00001011 36 ret i8 %m 37} 38 39; TODO: Borrow from the MSB is ok. 40 41define i8 @arbitrary_mask_sub_high_bit_dont_care_i8(i8 %x) { 42; CHECK-LABEL: @arbitrary_mask_sub_high_bit_dont_care_i8( 43; CHECK-NEXT: [[MASKX:%.*]] = and i8 [[X:%.*]], -93 44; CHECK-NEXT: [[S:%.*]] = sub i8 39, [[MASKX]] 45; CHECK-NEXT: ret i8 [[S]] 46; 47 %maskx = and i8 %x, 163 ; 0b10100011 48 %s = sub i8 39, %maskx ; 0b00100111 49 ret i8 %s 50} 51 52define i8 @arbitrary_mask_sub_nsw_high_bit_dont_care_i8(i8 %x) { 53; CHECK-LABEL: @arbitrary_mask_sub_nsw_high_bit_dont_care_i8( 54; CHECK-NEXT: [[MASKX:%.*]] = and i8 [[X:%.*]], -93 55; CHECK-NEXT: [[S:%.*]] = sub nsw i8 39, [[MASKX]] 56; CHECK-NEXT: ret i8 [[S]] 57; 58 %maskx = and i8 %x, 163 ; 0b10100011 59 %s = sub nsw i8 39, %maskx ; 0b00100111 60 ret i8 %s 61} 62 63define i8 @arbitrary_mask_sub_nuw_high_bit_dont_care_i8(i8 %x) { 64; CHECK-LABEL: @arbitrary_mask_sub_nuw_high_bit_dont_care_i8( 65; CHECK-NEXT: [[MASKX:%.*]] = and i8 [[X:%.*]], -93 66; CHECK-NEXT: [[S:%.*]] = sub nuw i8 39, [[MASKX]] 67; CHECK-NEXT: ret i8 [[S]] 68; 69 %maskx = and i8 %x, 163 ; 0b10100011 70 %s = sub nuw i8 39, %maskx ; 0b00100111 71 ret i8 %s 72} 73 74define <2 x i5> @arbitrary_mask_sub_v2i5(<2 x i5> %x) { 75; CHECK-LABEL: @arbitrary_mask_sub_v2i5( 76; CHECK-NEXT: [[A:%.*]] = and <2 x i5> [[X:%.*]], splat (i5 -8) 77; CHECK-NEXT: [[M:%.*]] = sub nuw nsw <2 x i5> splat (i5 -6), [[A]] 78; CHECK-NEXT: ret <2 x i5> [[M]] 79; 80 %a = and <2 x i5> %x, <i5 24, i5 24> ; 0b11000 81 %m = sub <2 x i5> <i5 26, i5 26>, %a ; 0b11010 82 ret <2 x i5> %m 83} 84 85define i8 @not_masked_sub_i8(i8 %x) { 86; CHECK-LABEL: @not_masked_sub_i8( 87; CHECK-NEXT: [[A:%.*]] = and i8 [[X:%.*]], 7 88; CHECK-NEXT: [[M:%.*]] = sub nuw nsw i8 11, [[A]] 89; CHECK-NEXT: ret i8 [[M]] 90; 91 %a = and i8 %x, 7 ; 0b00000111 92 %m = sub i8 11, %a ; 0b00001011 93 ret i8 %m 94} 95 96declare i32 @llvm.ctlz.i32(i32, i1) 97 98define i32 @range_masked_sub(i32 %x) { 99; CHECK-LABEL: @range_masked_sub( 100; CHECK-NEXT: [[COUNT:%.*]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 true) #[[ATTR1:[0-9]+]] 101; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[COUNT]], 31 102; CHECK-NEXT: ret i32 [[SUB]] 103; 104 %count = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true) nounwind readnone 105 %sub = sub i32 31, %count 106 ret i32 %sub 107} 108 109define i32 @xor_add(i32 %x) { 110; CHECK-LABEL: @xor_add( 111; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 31 112; CHECK-NEXT: [[ADD:%.*]] = sub nuw nsw i32 73, [[AND]] 113; CHECK-NEXT: ret i32 [[ADD]] 114; 115 %and = and i32 %x, 31 116 %xor = xor i32 %and, 31 117 %add = add i32 %xor, 42 118 ret i32 %add 119} 120 121define i32 @xor_add_extra_use(i32 %x) { 122; CHECK-LABEL: @xor_add_extra_use( 123; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 31 124; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[AND]], 31 125; CHECK-NEXT: call void @use(i32 [[XOR]]) 126; CHECK-NEXT: [[ADD:%.*]] = sub nuw nsw i32 73, [[AND]] 127; CHECK-NEXT: ret i32 [[ADD]] 128; 129 %and = and i32 %x, 31 130 %xor = xor i32 %and, 31 131 call void @use(i32 %xor) 132 %add = add i32 %xor, 42 133 ret i32 %add 134} 135 136define <2 x i8> @xor_add_splat(<2 x i8> %x) { 137; CHECK-LABEL: @xor_add_splat( 138; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[X:%.*]], splat (i8 24) 139; CHECK-NEXT: [[ADD:%.*]] = sub nuw nsw <2 x i8> splat (i8 105), [[AND]] 140; CHECK-NEXT: ret <2 x i8> [[ADD]] 141; 142 %and = and <2 x i8> %x, <i8 24, i8 24> 143 %xor = xor <2 x i8> %and, <i8 63, i8 63> 144 %add = add <2 x i8> %xor, <i8 42, i8 42> 145 ret <2 x i8> %add 146} 147 148define <2 x i8> @xor_add_splat_undef(<2 x i8> %x) { 149; CHECK-LABEL: @xor_add_splat_undef( 150; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[X:%.*]], splat (i8 24) 151; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[AND]], <i8 63, i8 undef> 152; CHECK-NEXT: [[ADD:%.*]] = add <2 x i8> [[XOR]], splat (i8 42) 153; CHECK-NEXT: ret <2 x i8> [[ADD]] 154; 155 %and = and <2 x i8> %x, <i8 24, i8 24> 156 %xor = xor <2 x i8> %and, <i8 63, i8 undef> 157 %add = add <2 x i8> %xor, <i8 42, i8 42> 158 ret <2 x i8> %add 159} 160 161; Make sure we don't convert sub to xor using dominating condition. That makes 162; it hard for other passe to reverse. 163define i32 @xor_dominating_cond(i32 %x) { 164; CHECK-LABEL: @xor_dominating_cond( 165; CHECK-NEXT: entry: 166; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X:%.*]], 256 167; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] 168; CHECK: if.then: 169; CHECK-NEXT: [[A:%.*]] = sub nuw nsw i32 255, [[X]] 170; CHECK-NEXT: ret i32 [[A]] 171; CHECK: if.end: 172; CHECK-NEXT: ret i32 [[X]] 173; 174entry: 175 %cond = icmp ult i32 %x, 256 176 br i1 %cond, label %if.then, label %if.end 177 178if.then: 179 %a = sub i32 255, %x 180 ret i32 %a 181 182if.end: 183 ret i32 %x 184} 185