1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=instcombine -S | FileCheck %s 3 4declare void @use(i32) 5define i1 @cmpeq_xor_cst1(i32 %a, i32 %b) { 6; CHECK-LABEL: @cmpeq_xor_cst1( 7; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], [[B:%.*]] 8; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 10 9; CHECK-NEXT: ret i1 [[CMP]] 10; 11 %c = xor i32 %a, 10 12 %cmp = icmp eq i32 %c, %b 13 ret i1 %cmp 14} 15 16define i1 @cmpeq_xor_cst2(i32 %a, i32 %b) { 17; CHECK-LABEL: @cmpeq_xor_cst2( 18; CHECK-NEXT: [[C:%.*]] = xor i32 [[A:%.*]], [[B:%.*]] 19; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[C]], 10 20; CHECK-NEXT: ret i1 [[CMP]] 21; 22 %c = xor i32 %a, %b 23 %cmp = icmp eq i32 %c, 10 24 ret i1 %cmp 25} 26 27define i1 @cmpeq_xor_cst3(i32 %a, i32 %b) { 28; CHECK-LABEL: @cmpeq_xor_cst3( 29; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] 30; CHECK-NEXT: ret i1 [[CMP]] 31; 32 %c = xor i32 %a, 10 33 %d = xor i32 %b, 10 34 %cmp = icmp eq i32 %c, %d 35 ret i1 %cmp 36} 37 38define i1 @cmpne_xor_cst1(i32 %a, i32 %b) { 39; CHECK-LABEL: @cmpne_xor_cst1( 40; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], [[B:%.*]] 41; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[TMP1]], 10 42; CHECK-NEXT: ret i1 [[CMP]] 43; 44 %c = xor i32 %a, 10 45 %cmp = icmp ne i32 %c, %b 46 ret i1 %cmp 47} 48 49define i1 @cmpne_xor_cst2(i32 %a, i32 %b) { 50; CHECK-LABEL: @cmpne_xor_cst2( 51; CHECK-NEXT: [[C:%.*]] = xor i32 [[A:%.*]], [[B:%.*]] 52; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[C]], 10 53; CHECK-NEXT: ret i1 [[CMP]] 54; 55 %c = xor i32 %a, %b 56 %cmp = icmp ne i32 %c, 10 57 ret i1 %cmp 58} 59 60define i1 @cmpne_xor_cst3(i32 %a, i32 %b) { 61; CHECK-LABEL: @cmpne_xor_cst3( 62; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]] 63; CHECK-NEXT: ret i1 [[CMP]] 64; 65 %c = xor i32 %a, 10 66 %d = xor i32 %b, 10 67 %cmp = icmp ne i32 %c, %d 68 ret i1 %cmp 69} 70 71define i1 @cmpeq_xor_cst1_multiuse(i32 %a, i32 %b) { 72; CHECK-LABEL: @cmpeq_xor_cst1_multiuse( 73; CHECK-NEXT: [[C:%.*]] = xor i32 [[A:%.*]], 10 74; CHECK-NEXT: call void @use(i32 [[C]]) 75; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[C]], [[B:%.*]] 76; CHECK-NEXT: ret i1 [[CMP]] 77; 78 %c = xor i32 %a, 10 79 call void @use(i32 %c) 80 %cmp = icmp eq i32 %c, %b 81 ret i1 %cmp 82} 83 84define i1 @cmpeq_xor_cst1_commuted(i32 %a, i32 %b) { 85; CHECK-LABEL: @cmpeq_xor_cst1_commuted( 86; CHECK-NEXT: [[B2:%.*]] = mul i32 [[B:%.*]], [[B]] 87; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], [[B2]] 88; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 10 89; CHECK-NEXT: ret i1 [[CMP]] 90; 91 %b2 = mul i32 %b, %b ; thwart complexity-based canonicalization 92 %c = xor i32 %a, 10 93 %cmp = icmp eq i32 %b2, %c 94 ret i1 %cmp 95} 96 97define <2 x i1> @cmpeq_xor_cst1_vec(<2 x i32> %a, <2 x i32> %b) { 98; CHECK-LABEL: @cmpeq_xor_cst1_vec( 99; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[A:%.*]], [[B:%.*]] 100; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], <i32 10, i32 11> 101; CHECK-NEXT: ret <2 x i1> [[CMP]] 102; 103 %c = xor <2 x i32> %a, <i32 10, i32 11> 104 %cmp = icmp eq <2 x i32> %b, %c 105 ret <2 x i1> %cmp 106} 107 108; tests from PR65968 109define i1 @foo1(i32 %x, i32 %y) { 110; CHECK-LABEL: @foo1( 111; CHECK-NEXT: [[NEG1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] 112; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[NEG1]], 0 113; CHECK-NEXT: ret i1 [[CMP]] 114; 115 %and = and i32 %x, -2147483648 116 %neg = xor i32 %y, -1 117 %and1 = and i32 %neg, -2147483648 118 %cmp = icmp eq i32 %and, %and1 119 ret i1 %cmp 120} 121 122define i1 @foo2(i32 %x, i32 %y) { 123; CHECK-LABEL: @foo2( 124; CHECK-NEXT: [[NEG1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] 125; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[NEG1]], 0 126; CHECK-NEXT: ret i1 [[CMP]] 127; 128 %and = and i32 %x, -2147483648 129 %neg = and i32 %y, -2147483648 130 %and1 = xor i32 %neg, -2147483648 131 %cmp = icmp eq i32 %and, %and1 132 ret i1 %cmp 133} 134 135; tests from PR67783 136define <2 x i1> @foo3(<2 x i8> %x) { 137; CHECK-LABEL: @foo3( 138; CHECK-NEXT: entry: 139; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[X:%.*]], <i8 -2, i8 -1> 140; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[XOR]], <i8 9, i8 79> 141; CHECK-NEXT: ret <2 x i1> [[CMP]] 142; 143entry: 144 %xor = xor <2 x i8> %x, <i8 -2, i8 -1> 145 %cmp = icmp ne <2 x i8> %xor, <i8 9, i8 79> 146 ret <2 x i1> %cmp 147} 148 149declare void @use.i8(i8) 150define i1 @fold_xorC_eq0_multiuse(i8 %x, i8 %y) { 151; CHECK-LABEL: @fold_xorC_eq0_multiuse( 152; CHECK-NEXT: [[XX:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]] 153; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[Y]] 154; CHECK-NEXT: call void @use.i8(i8 [[XX]]) 155; CHECK-NEXT: ret i1 [[R]] 156; 157 %xx = xor i8 %x, %y 158 %r = icmp eq i8 %xx, 0 159 call void @use.i8(i8 %xx) 160 ret i1 %r 161} 162 163define i1 @fold_xorC_eq1_multiuse_fail(i8 %x, i8 %y) { 164; CHECK-LABEL: @fold_xorC_eq1_multiuse_fail( 165; CHECK-NEXT: [[XX:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]] 166; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[XX]], 1 167; CHECK-NEXT: call void @use.i8(i8 [[XX]]) 168; CHECK-NEXT: ret i1 [[R]] 169; 170 %xx = xor i8 %x, %y 171 %r = icmp eq i8 %xx, 1 172 call void @use.i8(i8 %xx) 173 ret i1 %r 174} 175 176define i1 @fold_xorC_neC_multiuse(i8 %x) { 177; CHECK-LABEL: @fold_xorC_neC_multiuse( 178; CHECK-NEXT: [[XX:%.*]] = xor i8 [[X:%.*]], 45 179; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X]], 110 180; CHECK-NEXT: call void @use.i8(i8 [[XX]]) 181; CHECK-NEXT: ret i1 [[R]] 182; 183 %xx = xor i8 %x, 45 184 %r = icmp ne i8 %xx, 67 185 call void @use.i8(i8 %xx) 186 ret i1 %r 187} 188