xref: /llvm-project/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll (revision 1897bf61f0bc85c8637997d0f2aa7d94d375d787)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch64 --mattr=+d --target-abi=lp64d < %s \
3; RUN:   | FileCheck %s
4
5;; This file contains specific tests for the lp64d ABI.
6
7;; Check pass floating-point arguments whith FPRs.
8
9define i64 @callee_float_in_fpr(i64 %a, float %b, double %c) nounwind {
10; CHECK-LABEL: callee_float_in_fpr:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    ftintrz.l.s $fa0, $fa0
13; CHECK-NEXT:    movfr2gr.d $a1, $fa0
14; CHECK-NEXT:    ftintrz.l.d $fa0, $fa1
15; CHECK-NEXT:    movfr2gr.d $a2, $fa0
16; CHECK-NEXT:    add.d $a0, $a0, $a1
17; CHECK-NEXT:    add.d $a0, $a0, $a2
18; CHECK-NEXT:    ret
19  %b_fptosi = fptosi float %b to i64
20  %c_fptosi = fptosi double %c to i64
21  %1 = add i64 %a, %b_fptosi
22  %2 = add i64 %1, %c_fptosi
23  ret i64 %2
24}
25
26define i64 @caller_float_in_fpr() nounwind {
27; CHECK-LABEL: caller_float_in_fpr:
28; CHECK:       # %bb.0:
29; CHECK-NEXT:    addi.d $sp, $sp, -16
30; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
31; CHECK-NEXT:    movgr2fr.w $fa0, $zero
32; CHECK-NEXT:    movgr2fr.d $fa1, $zero
33; CHECK-NEXT:    ori $a0, $zero, 1
34; CHECK-NEXT:    bl %plt(callee_float_in_fpr)
35; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
36; CHECK-NEXT:    addi.d $sp, $sp, 16
37; CHECK-NEXT:    ret
38  %1 = call i64 @callee_float_in_fpr(i64 1, float 0.0, double 0.0)
39  ret i64 %1
40}
41
42;; Check that the GPR is used once the FPRs are exhausted.
43
44;; Must keep define on a single line due to an update_llc_test_checks.py limitation.
45define i64 @callee_double_in_gpr_exhausted_fprs(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) nounwind {
46; CHECK-LABEL: callee_double_in_gpr_exhausted_fprs:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    movgr2fr.d $fa0, $a0
49; CHECK-NEXT:    ftintrz.l.d $fa1, $fa7
50; CHECK-NEXT:    movfr2gr.d $a0, $fa1
51; CHECK-NEXT:    ftintrz.l.d $fa0, $fa0
52; CHECK-NEXT:    movfr2gr.d $a1, $fa0
53; CHECK-NEXT:    add.d $a0, $a0, $a1
54; CHECK-NEXT:    ret
55  %h_fptosi = fptosi double %h to i64
56  %i_fptosi = fptosi double %i to i64
57  %1 = add i64 %h_fptosi, %i_fptosi
58  ret i64 %1
59}
60
61define i64 @caller_double_in_gpr_exhausted_fprs() nounwind {
62; CHECK-LABEL: caller_double_in_gpr_exhausted_fprs:
63; CHECK:       # %bb.0:
64; CHECK-NEXT:    addi.d $sp, $sp, -16
65; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
66; CHECK-NEXT:    ori $a0, $zero, 0
67; CHECK-NEXT:    lu32i.d $a0, 131072
68; CHECK-NEXT:    lu52i.d $a0, $a0, 1026
69; CHECK-NEXT:    vldi $vr0, -912
70; CHECK-NEXT:    vldi $vr1, -1024
71; CHECK-NEXT:    vldi $vr2, -1016
72; CHECK-NEXT:    vldi $vr3, -1008
73; CHECK-NEXT:    vldi $vr4, -1004
74; CHECK-NEXT:    vldi $vr5, -1000
75; CHECK-NEXT:    vldi $vr6, -996
76; CHECK-NEXT:    vldi $vr7, -992
77; CHECK-NEXT:    bl %plt(callee_double_in_gpr_exhausted_fprs)
78; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
79; CHECK-NEXT:    addi.d $sp, $sp, 16
80; CHECK-NEXT:    ret
81  %1 = call i64 @callee_double_in_gpr_exhausted_fprs(
82      double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0,
83      double 7.0, double 8.0, double 9.0)
84  ret i64 %1
85}
86
87;; Check returning doubles.
88
89define double @callee_double_ret() nounwind {
90; CHECK-LABEL: callee_double_ret:
91; CHECK:       # %bb.0:
92; CHECK-NEXT:    vldi $vr0, -912
93; CHECK-NEXT:    ret
94  ret double 1.0
95}
96
97define i64 @caller_double_ret() nounwind {
98; CHECK-LABEL: caller_double_ret:
99; CHECK:       # %bb.0:
100; CHECK-NEXT:    addi.d $sp, $sp, -16
101; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
102; CHECK-NEXT:    bl %plt(callee_double_ret)
103; CHECK-NEXT:    movfr2gr.d $a0, $fa0
104; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
105; CHECK-NEXT:    addi.d $sp, $sp, 16
106; CHECK-NEXT:    ret
107  %1 = call double @callee_double_ret()
108  %2 = bitcast double %1 to i64
109  ret i64 %2
110}
111