1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=instcombine -S | FileCheck %s 3 4; With left shift, the comparison should not be modified. 5define i1 @test_shift_and_cmp_not_changed1(i8 %p) { 6; CHECK-LABEL: @test_shift_and_cmp_not_changed1( 7; CHECK-NEXT: [[SHLP:%.*]] = shl i8 [[P:%.*]], 5 8; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[SHLP]], 64 9; CHECK-NEXT: ret i1 [[CMP]] 10; 11 %shlp = shl i8 %p, 5 12 %andp = and i8 %shlp, -64 13 %cmp = icmp slt i8 %andp, 32 14 ret i1 %cmp 15} 16 17; With arithmetic right shift, the comparison should not be modified. 18define i1 @test_shift_and_cmp_not_changed2(i8 %p) { 19; CHECK-LABEL: @test_shift_and_cmp_not_changed2( 20; CHECK-NEXT: ret i1 true 21; 22 %shlp = ashr i8 %p, 5 23 %andp = and i8 %shlp, -64 24 %cmp = icmp slt i8 %andp, 32 25 ret i1 %cmp 26} 27 28; This should simplify functionally to the left shift case. 29; The extra input parameter should be optimized away. 30define i1 @test_shift_and_cmp_changed1(i8 %p, i8 %q) { 31; CHECK-LABEL: @test_shift_and_cmp_changed1( 32; CHECK-NEXT: [[ANDP:%.*]] = shl i8 [[P:%.*]], 5 33; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[ANDP]], 33 34; CHECK-NEXT: ret i1 [[CMP]] 35; 36 %andp = and i8 %p, 6 37 %andq = and i8 %q, 8 38 %or = or i8 %andq, %andp 39 %shl = shl i8 %or, 5 40 %ashr = ashr i8 %shl, 5 41 %cmp = icmp slt i8 %ashr, 1 42 ret i1 %cmp 43} 44 45define <2 x i1> @test_shift_and_cmp_changed1_vec(<2 x i8> %p, <2 x i8> %q) { 46; CHECK-LABEL: @test_shift_and_cmp_changed1_vec( 47; CHECK-NEXT: [[ANDP:%.*]] = shl <2 x i8> [[P:%.*]], splat (i8 5) 48; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[ANDP]], splat (i8 33) 49; CHECK-NEXT: ret <2 x i1> [[CMP]] 50; 51 %andp = and <2 x i8> %p, <i8 6, i8 6> 52 %andq = and <2 x i8> %q, <i8 8, i8 8> 53 %or = or <2 x i8> %andq, %andp 54 %shl = shl <2 x i8> %or, <i8 5, i8 5> 55 %ashr = ashr <2 x i8> %shl, <i8 5, i8 5> 56 %cmp = icmp slt <2 x i8> %ashr, <i8 1, i8 1> 57 ret <2 x i1> %cmp 58} 59 60; Unsigned compare allows a transformation to compare against 0. 61define i1 @test_shift_and_cmp_changed2(i8 %p) { 62; CHECK-LABEL: @test_shift_and_cmp_changed2( 63; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[P:%.*]], 6 64; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP1]], 0 65; CHECK-NEXT: ret i1 [[CMP]] 66; 67 %shlp = shl i8 %p, 5 68 %andp = and i8 %shlp, -64 69 %cmp = icmp ult i8 %andp, 32 70 ret i1 %cmp 71} 72 73define <2 x i1> @test_shift_and_cmp_changed2_vec(<2 x i8> %p) { 74; CHECK-LABEL: @test_shift_and_cmp_changed2_vec( 75; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[P:%.*]], splat (i8 6) 76; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[TMP1]], zeroinitializer 77; CHECK-NEXT: ret <2 x i1> [[CMP]] 78; 79 %shlp = shl <2 x i8> %p, <i8 5, i8 5> 80 %andp = and <2 x i8> %shlp, <i8 -64, i8 -64> 81 %cmp = icmp ult <2 x i8> %andp, <i8 32, i8 32> 82 ret <2 x i1> %cmp 83} 84 85; nsw on the shift should not affect the comparison. 86define i1 @test_shift_and_cmp_changed3(i8 %p) { 87; CHECK-LABEL: @test_shift_and_cmp_changed3( 88; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[P:%.*]], 2 89; CHECK-NEXT: ret i1 [[CMP]] 90; 91 %shlp = shl nsw i8 %p, 5 92 %andp = and i8 %shlp, -64 93 %cmp = icmp slt i8 %andp, 32 94 ret i1 %cmp 95} 96 97; Logical shift right allows a return true because the 'and' guarantees no bits are set. 98define i1 @test_shift_and_cmp_changed4(i8 %p) { 99; CHECK-LABEL: @test_shift_and_cmp_changed4( 100; CHECK-NEXT: ret i1 true 101; 102 %shlp = lshr i8 %p, 5 103 %andp = and i8 %shlp, -64 104 %cmp = icmp slt i8 %andp, 32 105 ret i1 %cmp 106} 107 108