xref: /llvm-project/llvm/test/CodeGen/LoongArch/spill-reload-cfr.ll (revision a5c90e48b6f11bc6db7344503589648f76b16d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
3; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
4
5;; Check the $fcc* register is spilled before funtion call and then reloaded.
6declare void @foo()
7
8define i1 @load_store_fcc_reg(float %a, i1 %c) {
9; LA32-LABEL: load_store_fcc_reg:
10; LA32:       # %bb.0:
11; LA32-NEXT:    addi.w $sp, $sp, -16
12; LA32-NEXT:    .cfi_def_cfa_offset 16
13; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
14; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
15; LA32-NEXT:    fst.d $fs0, $sp, 0 # 8-byte Folded Spill
16; LA32-NEXT:    .cfi_offset 1, -4
17; LA32-NEXT:    .cfi_offset 22, -8
18; LA32-NEXT:    .cfi_offset 56, -16
19; LA32-NEXT:    move $fp, $a0
20; LA32-NEXT:    fmov.s $fs0, $fa0
21; LA32-NEXT:    bl %plt(foo)
22; LA32-NEXT:    movgr2fr.w $fa0, $zero
23; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fs0
24; LA32-NEXT:    bcnez $fcc0, .LBB0_2
25; LA32-NEXT:  # %bb.1: # %if.then
26; LA32-NEXT:    move $a0, $fp
27; LA32-NEXT:    b .LBB0_3
28; LA32-NEXT:  .LBB0_2: # %if.else
29; LA32-NEXT:    fcmp.cle.s $fcc0, $fs0, $fa0
30; LA32-NEXT:    movcf2gr $a0, $fcc0
31; LA32-NEXT:  .LBB0_3: # %if.then
32; LA32-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
33; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
34; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
35; LA32-NEXT:    addi.w $sp, $sp, 16
36; LA32-NEXT:    ret
37;
38; LA64-LABEL: load_store_fcc_reg:
39; LA64:       # %bb.0:
40; LA64-NEXT:    addi.d $sp, $sp, -32
41; LA64-NEXT:    .cfi_def_cfa_offset 32
42; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
43; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
44; LA64-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
45; LA64-NEXT:    .cfi_offset 1, -8
46; LA64-NEXT:    .cfi_offset 22, -16
47; LA64-NEXT:    .cfi_offset 56, -24
48; LA64-NEXT:    move $fp, $a0
49; LA64-NEXT:    fmov.s $fs0, $fa0
50; LA64-NEXT:    bl %plt(foo)
51; LA64-NEXT:    movgr2fr.w $fa0, $zero
52; LA64-NEXT:    fcmp.cult.s $fcc0, $fa0, $fs0
53; LA64-NEXT:    bcnez $fcc0, .LBB0_2
54; LA64-NEXT:  # %bb.1: # %if.then
55; LA64-NEXT:    move $a0, $fp
56; LA64-NEXT:    b .LBB0_3
57; LA64-NEXT:  .LBB0_2: # %if.else
58; LA64-NEXT:    fcmp.cle.s $fcc0, $fs0, $fa0
59; LA64-NEXT:    movcf2gr $a0, $fcc0
60; LA64-NEXT:  .LBB0_3: # %if.then
61; LA64-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
62; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
63; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
64; LA64-NEXT:    addi.d $sp, $sp, 32
65; LA64-NEXT:    ret
66  %cmp = fcmp ole float %a, 0.000000e+00
67  call void @foo()
68  br i1 %cmp, label %if.then, label %if.else
69
70if.then:
71  ret i1 %c
72
73if.else:
74  ret i1 %cmp
75}
76