1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV32IFD %s 4; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs < %s \ 5; RUN: | FileCheck -check-prefix=RV64IFD %s 6; RUN: llc -mtriple=riscv32 -mattr=+zdinx -target-abi=ilp32 -verify-machineinstrs < %s \ 7; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s 8; RUN: llc -mtriple=riscv64 -mattr=+zdinx -target-abi=lp64 -verify-machineinstrs < %s \ 9; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s 10 11define double @func(double %d, i32 %n) nounwind { 12; RV32IFD-LABEL: func: 13; RV32IFD: # %bb.0: # %entry 14; RV32IFD-NEXT: addi sp, sp, -32 15; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 16; RV32IFD-NEXT: sw a0, 16(sp) 17; RV32IFD-NEXT: sw a1, 20(sp) 18; RV32IFD-NEXT: fld fa5, 16(sp) 19; RV32IFD-NEXT: beqz a2, .LBB0_2 20; RV32IFD-NEXT: # %bb.1: # %if.else 21; RV32IFD-NEXT: addi a2, a2, -1 22; RV32IFD-NEXT: fsd fa5, 16(sp) 23; RV32IFD-NEXT: lw a0, 16(sp) 24; RV32IFD-NEXT: lw a1, 20(sp) 25; RV32IFD-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill 26; RV32IFD-NEXT: call func 27; RV32IFD-NEXT: sw a0, 16(sp) 28; RV32IFD-NEXT: sw a1, 20(sp) 29; RV32IFD-NEXT: fld fa5, 16(sp) 30; RV32IFD-NEXT: fld fa4, 8(sp) # 8-byte Folded Reload 31; RV32IFD-NEXT: fadd.d fa5, fa5, fa4 32; RV32IFD-NEXT: .LBB0_2: # %return 33; RV32IFD-NEXT: fsd fa5, 16(sp) 34; RV32IFD-NEXT: lw a0, 16(sp) 35; RV32IFD-NEXT: lw a1, 20(sp) 36; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 37; RV32IFD-NEXT: addi sp, sp, 32 38; RV32IFD-NEXT: ret 39; 40; RV64IFD-LABEL: func: 41; RV64IFD: # %bb.0: # %entry 42; RV64IFD-NEXT: sext.w a2, a1 43; RV64IFD-NEXT: fmv.d.x fa5, a0 44; RV64IFD-NEXT: beqz a2, .LBB0_2 45; RV64IFD-NEXT: # %bb.1: # %if.else 46; RV64IFD-NEXT: addi sp, sp, -16 47; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 48; RV64IFD-NEXT: addiw a1, a1, -1 49; RV64IFD-NEXT: fmv.x.d a0, fa5 50; RV64IFD-NEXT: fsd fa5, 0(sp) # 8-byte Folded Spill 51; RV64IFD-NEXT: call func 52; RV64IFD-NEXT: fmv.d.x fa5, a0 53; RV64IFD-NEXT: fld fa4, 0(sp) # 8-byte Folded Reload 54; RV64IFD-NEXT: fadd.d fa5, fa5, fa4 55; RV64IFD-NEXT: fmv.x.d a0, fa5 56; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 57; RV64IFD-NEXT: addi sp, sp, 16 58; RV64IFD-NEXT: ret 59; RV64IFD-NEXT: .LBB0_2: # %return 60; RV64IFD-NEXT: fmv.x.d a0, fa5 61; RV64IFD-NEXT: ret 62; 63; RV32IZFINXZDINX-LABEL: func: 64; RV32IZFINXZDINX: # %bb.0: # %entry 65; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 66; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 67; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 68; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill 69; RV32IZFINXZDINX-NEXT: mv s1, a1 70; RV32IZFINXZDINX-NEXT: mv s0, a0 71; RV32IZFINXZDINX-NEXT: beqz a2, .LBB0_2 72; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else 73; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 74; RV32IZFINXZDINX-NEXT: mv a0, s0 75; RV32IZFINXZDINX-NEXT: mv a1, s1 76; RV32IZFINXZDINX-NEXT: call func 77; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, s0 78; RV32IZFINXZDINX-NEXT: j .LBB0_3 79; RV32IZFINXZDINX-NEXT: .LBB0_2: # %return 80; RV32IZFINXZDINX-NEXT: mv a0, s0 81; RV32IZFINXZDINX-NEXT: mv a1, s1 82; RV32IZFINXZDINX-NEXT: .LBB0_3: # %return 83; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 84; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 85; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload 86; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 87; RV32IZFINXZDINX-NEXT: ret 88; 89; RV64IZFINXZDINX-LABEL: func: 90; RV64IZFINXZDINX: # %bb.0: # %entry 91; RV64IZFINXZDINX-NEXT: sext.w a2, a1 92; RV64IZFINXZDINX-NEXT: beqz a2, .LBB0_2 93; RV64IZFINXZDINX-NEXT: # %bb.1: # %if.else 94; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 95; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 96; RV64IZFINXZDINX-NEXT: sd s0, 0(sp) # 8-byte Folded Spill 97; RV64IZFINXZDINX-NEXT: addiw a1, a1, -1 98; RV64IZFINXZDINX-NEXT: mv s0, a0 99; RV64IZFINXZDINX-NEXT: call func 100; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, s0 101; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 102; RV64IZFINXZDINX-NEXT: ld s0, 0(sp) # 8-byte Folded Reload 103; RV64IZFINXZDINX-NEXT: addi sp, sp, 16 104; RV64IZFINXZDINX-NEXT: .LBB0_2: # %return 105; RV64IZFINXZDINX-NEXT: ret 106entry: 107 %cmp = icmp eq i32 %n, 0 108 br i1 %cmp, label %return, label %if.else 109 110if.else: 111 %sub = add i32 %n, -1 112 %call = tail call double @func(double %d, i32 %sub) 113 %add = fadd double %call, %d 114 ret double %add 115 116return: 117 ret double %d 118} 119