1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV32I %s 4; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 5; RUN: | FileCheck -check-prefix=RV64I %s 6; RUN: llc -mtriple=riscv64 -mattr=+xmipscmove -verify-machineinstrs < %s \ 7; RUN: | FileCheck -check-prefix=RV64I-CCMOV %s 8 9;; There are a few different ways to lower (select (and A, B), X, Y). This test 10;; ensures that we do so with as few branches as possible. 11 12define signext i32 @select_of_and(i1 zeroext %a, i1 zeroext %b, i32 signext %c, i32 signext %d) nounwind { 13; RV32I-LABEL: select_of_and: 14; RV32I: # %bb.0: 15; RV32I-NEXT: and a1, a0, a1 16; RV32I-NEXT: mv a0, a2 17; RV32I-NEXT: bnez a1, .LBB0_2 18; RV32I-NEXT: # %bb.1: 19; RV32I-NEXT: mv a0, a3 20; RV32I-NEXT: .LBB0_2: 21; RV32I-NEXT: ret 22; 23; RV64I-LABEL: select_of_and: 24; RV64I: # %bb.0: 25; RV64I-NEXT: and a1, a0, a1 26; RV64I-NEXT: mv a0, a2 27; RV64I-NEXT: bnez a1, .LBB0_2 28; RV64I-NEXT: # %bb.1: 29; RV64I-NEXT: mv a0, a3 30; RV64I-NEXT: .LBB0_2: 31; RV64I-NEXT: ret 32; 33; RV64I-CCMOV-LABEL: select_of_and: 34; RV64I-CCMOV: # %bb.0: 35; RV64I-CCMOV-NEXT: and a0, a0, a1 36; RV64I-CCMOV-NEXT: mips.ccmov a0, a0, a2, a3 37; RV64I-CCMOV-NEXT: ret 38 %1 = and i1 %a, %b 39 %2 = select i1 %1, i32 %c, i32 %d 40 ret i32 %2 41} 42 43declare signext i32 @both() nounwind 44declare signext i32 @neither() nounwind 45 46define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind { 47; RV32I-LABEL: if_of_and: 48; RV32I: # %bb.0: 49; RV32I-NEXT: addi sp, sp, -16 50; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 51; RV32I-NEXT: beqz a0, .LBB1_3 52; RV32I-NEXT: # %bb.1: 53; RV32I-NEXT: beqz a1, .LBB1_3 54; RV32I-NEXT: # %bb.2: # %if.then 55; RV32I-NEXT: call both 56; RV32I-NEXT: j .LBB1_4 57; RV32I-NEXT: .LBB1_3: # %if.else 58; RV32I-NEXT: call neither 59; RV32I-NEXT: .LBB1_4: # %if.end 60; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 61; RV32I-NEXT: addi sp, sp, 16 62; RV32I-NEXT: ret 63; 64; RV64I-LABEL: if_of_and: 65; RV64I: # %bb.0: 66; RV64I-NEXT: addi sp, sp, -16 67; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 68; RV64I-NEXT: beqz a0, .LBB1_3 69; RV64I-NEXT: # %bb.1: 70; RV64I-NEXT: beqz a1, .LBB1_3 71; RV64I-NEXT: # %bb.2: # %if.then 72; RV64I-NEXT: call both 73; RV64I-NEXT: j .LBB1_4 74; RV64I-NEXT: .LBB1_3: # %if.else 75; RV64I-NEXT: call neither 76; RV64I-NEXT: .LBB1_4: # %if.end 77; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 78; RV64I-NEXT: addi sp, sp, 16 79; RV64I-NEXT: ret 80; 81; RV64I-CCMOV-LABEL: if_of_and: 82; RV64I-CCMOV: # %bb.0: 83; RV64I-CCMOV-NEXT: addi sp, sp, -16 84; RV64I-CCMOV-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 85; RV64I-CCMOV-NEXT: beqz a0, .LBB1_3 86; RV64I-CCMOV-NEXT: # %bb.1: 87; RV64I-CCMOV-NEXT: beqz a1, .LBB1_3 88; RV64I-CCMOV-NEXT: # %bb.2: # %if.then 89; RV64I-CCMOV-NEXT: call both 90; RV64I-CCMOV-NEXT: j .LBB1_4 91; RV64I-CCMOV-NEXT: .LBB1_3: # %if.else 92; RV64I-CCMOV-NEXT: call neither 93; RV64I-CCMOV-NEXT: .LBB1_4: # %if.end 94; RV64I-CCMOV-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 95; RV64I-CCMOV-NEXT: addi sp, sp, 16 96; RV64I-CCMOV-NEXT: ret 97 %1 = and i1 %a, %b 98 br i1 %1, label %if.then, label %if.else 99 100if.then: 101 %2 = tail call i32 @both() 102 br label %if.end 103 104if.else: 105 %3 = tail call i32 @neither() 106 br label %if.end 107 108if.end: 109 %4 = phi i32 [%2, %if.then], [%3, %if.else] 110 ret i32 %4 111} 112