xref: /llvm-project/llvm/test/CodeGen/LoongArch/stack-realignment-with-variable-sized-objects.ll (revision daf067da04c98ce666f67ba2b3beed4cf4be7549)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=LA32
4; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=LA64
6
7declare void @callee(i8*, i32*)
8
9define void @caller(i32 %n) {
10; LA32-LABEL: caller:
11; LA32:       # %bb.0:
12; LA32-NEXT:    addi.w $sp, $sp, -64
13; LA32-NEXT:    .cfi_def_cfa_offset 64
14; LA32-NEXT:    st.w $ra, $sp, 60 # 4-byte Folded Spill
15; LA32-NEXT:    st.w $fp, $sp, 56 # 4-byte Folded Spill
16; LA32-NEXT:    st.w $s8, $sp, 52 # 4-byte Folded Spill
17; LA32-NEXT:    .cfi_offset 1, -4
18; LA32-NEXT:    .cfi_offset 22, -8
19; LA32-NEXT:    .cfi_offset 31, -12
20; LA32-NEXT:    addi.w $fp, $sp, 64
21; LA32-NEXT:    .cfi_def_cfa 22, 0
22; LA32-NEXT:    srli.w $a1, $sp, 6
23; LA32-NEXT:    slli.w $sp, $a1, 6
24; LA32-NEXT:    move $s8, $sp
25; LA32-NEXT:    addi.w $a0, $a0, 15
26; LA32-NEXT:    addi.w $a1, $zero, -16
27; LA32-NEXT:    and $a0, $a0, $a1
28; LA32-NEXT:    sub.w $a0, $sp, $a0
29; LA32-NEXT:    move $sp, $a0
30; LA32-NEXT:    addi.w $a1, $s8, 0
31; LA32-NEXT:    bl %plt(callee)
32; LA32-NEXT:    addi.w $sp, $fp, -64
33; LA32-NEXT:    ld.w $s8, $sp, 52 # 4-byte Folded Reload
34; LA32-NEXT:    ld.w $fp, $sp, 56 # 4-byte Folded Reload
35; LA32-NEXT:    ld.w $ra, $sp, 60 # 4-byte Folded Reload
36; LA32-NEXT:    addi.w $sp, $sp, 64
37; LA32-NEXT:    ret
38;
39; LA64-LABEL: caller:
40; LA64:       # %bb.0:
41; LA64-NEXT:    addi.d $sp, $sp, -64
42; LA64-NEXT:    .cfi_def_cfa_offset 64
43; LA64-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
44; LA64-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
45; LA64-NEXT:    st.d $s8, $sp, 40 # 8-byte Folded Spill
46; LA64-NEXT:    .cfi_offset 1, -8
47; LA64-NEXT:    .cfi_offset 22, -16
48; LA64-NEXT:    .cfi_offset 31, -24
49; LA64-NEXT:    addi.d $fp, $sp, 64
50; LA64-NEXT:    .cfi_def_cfa 22, 0
51; LA64-NEXT:    srli.d $a1, $sp, 6
52; LA64-NEXT:    slli.d $sp, $a1, 6
53; LA64-NEXT:    move $s8, $sp
54; LA64-NEXT:    addi.w $a1, $zero, -16
55; LA64-NEXT:    lu32i.d $a1, 1
56; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
57; LA64-NEXT:    addi.d $a0, $a0, 15
58; LA64-NEXT:    and $a0, $a0, $a1
59; LA64-NEXT:    sub.d $a0, $sp, $a0
60; LA64-NEXT:    move $sp, $a0
61; LA64-NEXT:    addi.d $a1, $s8, 0
62; LA64-NEXT:    bl %plt(callee)
63; LA64-NEXT:    addi.d $sp, $fp, -64
64; LA64-NEXT:    ld.d $s8, $sp, 40 # 8-byte Folded Reload
65; LA64-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
66; LA64-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
67; LA64-NEXT:    addi.d $sp, $sp, 64
68; LA64-NEXT:    ret
69  %1 = alloca i8, i32 %n
70  %2 = alloca i32, align 64
71  call void @callee(i8* %1, i32 *%2)
72  ret void
73}
74