1*58c9ad9cSAustin Chang; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2*58c9ad9cSAustin Chang; RUN: llc < %s -mtriple=thumbv7m-none-eabi -mattr=v7 | FileCheck %s --check-prefixes=CHECK 3*58c9ad9cSAustin Chang 4*58c9ad9cSAustin Changdeclare i16 @llvm.bswap.i16(i16) readnone 5*58c9ad9cSAustin Changdeclare i32 @llvm.bswap.i32(i32) readnone 6*58c9ad9cSAustin Changdeclare i32 @llvm.bitreverse.i32(i32) readnone 7*58c9ad9cSAustin Chang 8*58c9ad9cSAustin Changdefine i32 @brev_and_lhs_brev32(i32 %a, i32 %b) #0 { 9*58c9ad9cSAustin Chang; CHECK-LABEL: brev_and_lhs_brev32: 10*58c9ad9cSAustin Chang; CHECK: @ %bb.0: 11*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r0, r0 12*58c9ad9cSAustin Chang; CHECK-NEXT: ands r0, r1 13*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r0, r0 14*58c9ad9cSAustin Chang; CHECK-NEXT: bx lr 15*58c9ad9cSAustin Chang %1 = tail call i32 @llvm.bitreverse.i32(i32 %a) 16*58c9ad9cSAustin Chang %2 = and i32 %1, %b 17*58c9ad9cSAustin Chang %3 = tail call i32 @llvm.bitreverse.i32(i32 %2) 18*58c9ad9cSAustin Chang ret i32 %3 19*58c9ad9cSAustin Chang} 20*58c9ad9cSAustin Chang 21*58c9ad9cSAustin Changdefine i32 @brev_or_lhs_brev32(i32 %a, i32 %b) #0 { 22*58c9ad9cSAustin Chang; CHECK-LABEL: brev_or_lhs_brev32: 23*58c9ad9cSAustin Chang; CHECK: @ %bb.0: 24*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r0, r0 25*58c9ad9cSAustin Chang; CHECK-NEXT: orrs r0, r1 26*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r0, r0 27*58c9ad9cSAustin Chang; CHECK-NEXT: bx lr 28*58c9ad9cSAustin Chang %1 = tail call i32 @llvm.bitreverse.i32(i32 %a) 29*58c9ad9cSAustin Chang %2 = or i32 %1, %b 30*58c9ad9cSAustin Chang %3 = tail call i32 @llvm.bitreverse.i32(i32 %2) 31*58c9ad9cSAustin Chang ret i32 %3 32*58c9ad9cSAustin Chang} 33*58c9ad9cSAustin Chang 34*58c9ad9cSAustin Changdefine i32 @brev_xor_rhs_brev32(i32 %a, i32 %b) #0 { 35*58c9ad9cSAustin Chang; CHECK-LABEL: brev_xor_rhs_brev32: 36*58c9ad9cSAustin Chang; CHECK: @ %bb.0: 37*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r1, r1 38*58c9ad9cSAustin Chang; CHECK-NEXT: eors r0, r1 39*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r0, r0 40*58c9ad9cSAustin Chang; CHECK-NEXT: bx lr 41*58c9ad9cSAustin Chang %1 = tail call i32 @llvm.bitreverse.i32(i32 %b) 42*58c9ad9cSAustin Chang %2 = xor i32 %a, %1 43*58c9ad9cSAustin Chang %3 = tail call i32 @llvm.bitreverse.i32(i32 %2) 44*58c9ad9cSAustin Chang ret i32 %3 45*58c9ad9cSAustin Chang} 46*58c9ad9cSAustin Chang 47*58c9ad9cSAustin Changdefine i32 @brev_and_all_operand_multiuse(i32 %a, i32 %b) #0 { 48*58c9ad9cSAustin Chang; CHECK-LABEL: brev_and_all_operand_multiuse: 49*58c9ad9cSAustin Chang; CHECK: @ %bb.0: 50*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r1, r1 51*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r0, r0 52*58c9ad9cSAustin Chang; CHECK-NEXT: and.w r2, r0, r1 53*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r2, r2 54*58c9ad9cSAustin Chang; CHECK-NEXT: muls r0, r2, r0 55*58c9ad9cSAustin Chang; CHECK-NEXT: muls r0, r1, r0 56*58c9ad9cSAustin Chang; CHECK-NEXT: bx lr 57*58c9ad9cSAustin Chang %1 = tail call i32 @llvm.bitreverse.i32(i32 %a) 58*58c9ad9cSAustin Chang %2 = tail call i32 @llvm.bitreverse.i32(i32 %b) 59*58c9ad9cSAustin Chang %3 = and i32 %1, %2 60*58c9ad9cSAustin Chang %4 = tail call i32 @llvm.bitreverse.i32(i32 %3) 61*58c9ad9cSAustin Chang %5 = mul i32 %1, %4 ;increase use of left bitreverse 62*58c9ad9cSAustin Chang %6 = mul i32 %2, %5 ;increase use of right bitreverse 63*58c9ad9cSAustin Chang 64*58c9ad9cSAustin Chang ret i32 %6 65*58c9ad9cSAustin Chang} 66*58c9ad9cSAustin Chang 67*58c9ad9cSAustin Chang; negative test 68*58c9ad9cSAustin Changdefine i32 @brev_and_rhs_brev32_multiuse1(i32 %a, i32 %b) #0 { 69*58c9ad9cSAustin Chang; CHECK-LABEL: brev_and_rhs_brev32_multiuse1: 70*58c9ad9cSAustin Chang; CHECK: @ %bb.0: 71*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r1, r1 72*58c9ad9cSAustin Chang; CHECK-NEXT: ands r0, r1 73*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r1, r0 74*58c9ad9cSAustin Chang; CHECK-NEXT: muls r0, r1, r0 75*58c9ad9cSAustin Chang; CHECK-NEXT: bx lr 76*58c9ad9cSAustin Chang %1 = tail call i32 @llvm.bitreverse.i32(i32 %b) 77*58c9ad9cSAustin Chang %2 = and i32 %1, %a 78*58c9ad9cSAustin Chang %3 = tail call i32 @llvm.bitreverse.i32(i32 %2) 79*58c9ad9cSAustin Chang %4 = mul i32 %2, %3 ;increase use of logical op 80*58c9ad9cSAustin Chang ret i32 %4 81*58c9ad9cSAustin Chang} 82*58c9ad9cSAustin Chang 83*58c9ad9cSAustin Chang; negative test 84*58c9ad9cSAustin Changdefine i32 @brev_and_rhs_brev32_multiuse2(i32 %a, i32 %b) #0 { 85*58c9ad9cSAustin Chang; CHECK-LABEL: brev_and_rhs_brev32_multiuse2: 86*58c9ad9cSAustin Chang; CHECK: @ %bb.0: 87*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r1, r1 88*58c9ad9cSAustin Chang; CHECK-NEXT: ands r0, r1 89*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r0, r0 90*58c9ad9cSAustin Chang; CHECK-NEXT: muls r0, r1, r0 91*58c9ad9cSAustin Chang; CHECK-NEXT: bx lr 92*58c9ad9cSAustin Chang %1 = tail call i32 @llvm.bitreverse.i32(i32 %b) 93*58c9ad9cSAustin Chang %2 = and i32 %1, %a 94*58c9ad9cSAustin Chang %3 = tail call i32 @llvm.bitreverse.i32(i32 %2) 95*58c9ad9cSAustin Chang %4 = mul i32 %1, %3 ;increase use of inner bitreverse 96*58c9ad9cSAustin Chang ret i32 %4 97*58c9ad9cSAustin Chang} 98*58c9ad9cSAustin Chang 99*58c9ad9cSAustin Chang; negative test 100*58c9ad9cSAustin Changdefine i32 @brev_xor_rhs_bs32(i32 %a, i32 %b) #0 { 101*58c9ad9cSAustin Chang; CHECK-LABEL: brev_xor_rhs_bs32: 102*58c9ad9cSAustin Chang; CHECK: @ %bb.0: 103*58c9ad9cSAustin Chang; CHECK-NEXT: rev r1, r1 104*58c9ad9cSAustin Chang; CHECK-NEXT: eors r0, r1 105*58c9ad9cSAustin Chang; CHECK-NEXT: rbit r0, r0 106*58c9ad9cSAustin Chang; CHECK-NEXT: bx lr 107*58c9ad9cSAustin Chang %1 = tail call i32 @llvm.bswap.i32(i32 %b) 108*58c9ad9cSAustin Chang %2 = xor i32 %a, %1 109*58c9ad9cSAustin Chang %3 = tail call i32 @llvm.bitreverse.i32(i32 %2) 110*58c9ad9cSAustin Chang ret i32 %3 111*58c9ad9cSAustin Chang} 112*58c9ad9cSAustin Chang 113