1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc --mtriple=loongarch32 --mattr=+d --target-abi=ilp32d --verify-machineinstrs < %s \ 3; RUN: | FileCheck --check-prefix=LA32 %s 4; RUN: llc --mtriple=loongarch64 --mattr=+d --target-abi=lp64d --verify-machineinstrs < %s \ 5; RUN: | FileCheck --check-prefix=LA64 %s 6 7@gd = external dso_local global double 8 9define double @constraint_f_double(double %a) nounwind { 10; LA32-LABEL: constraint_f_double: 11; LA32: # %bb.0: 12; LA32-NEXT: pcalau12i $a0, %pc_hi20(gd) 13; LA32-NEXT: fld.d $fa1, $a0, %pc_lo12(gd) 14; LA32-NEXT: #APP 15; LA32-NEXT: fadd.d $fa0, $fa0, $fa1 16; LA32-NEXT: #NO_APP 17; LA32-NEXT: ret 18; 19; LA64-LABEL: constraint_f_double: 20; LA64: # %bb.0: 21; LA64-NEXT: pcalau12i $a0, %pc_hi20(gd) 22; LA64-NEXT: fld.d $fa1, $a0, %pc_lo12(gd) 23; LA64-NEXT: #APP 24; LA64-NEXT: fadd.d $fa0, $fa0, $fa1 25; LA64-NEXT: #NO_APP 26; LA64-NEXT: ret 27 %1 = load double, ptr @gd 28 %2 = tail call double asm "fadd.d $0, $1, $2", "=f,f,f"(double %a, double %1) 29 ret double %2 30} 31 32define double @constraint_gpr(double %a) { 33; LA32-LABEL: constraint_gpr: 34; LA32: # %bb.0: 35; LA32-NEXT: addi.w $sp, $sp, -16 36; LA32-NEXT: .cfi_def_cfa_offset 16 37; LA32-NEXT: fst.d $fa0, $sp, 8 38; LA32-NEXT: ld.w $a7, $sp, 8 39; LA32-NEXT: ld.w $t0, $sp, 12 40; LA32-NEXT: #APP 41; LA32-NEXT: move $a6, $a7 42; LA32-NEXT: #NO_APP 43; LA32-NEXT: st.w $a7, $sp, 4 44; LA32-NEXT: st.w $a6, $sp, 0 45; LA32-NEXT: fld.d $fa0, $sp, 0 46; LA32-NEXT: addi.w $sp, $sp, 16 47; LA32-NEXT: ret 48; 49; LA64-LABEL: constraint_gpr: 50; LA64: # %bb.0: 51; LA64-NEXT: .cfi_def_cfa_offset 0 52; LA64-NEXT: movfr2gr.d $a7, $fa0 53; LA64-NEXT: #APP 54; LA64-NEXT: move $a6, $a7 55; LA64-NEXT: #NO_APP 56; LA64-NEXT: movgr2fr.d $fa0, $a6 57; LA64-NEXT: ret 58 %1 = tail call double asm sideeffect alignstack "move $0, $1", "={$r10},{$r11}"(double %a) 59 ret double %1 60} 61