xref: /llvm-project/llvm/test/CodeGen/LoongArch/stack-realignment-with-variable-sized-objects.ll (revision 9d4f7f44b64d87d1068859906f43b7ce03a7388b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 -mattr=+d --verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=LA32
4; RUN: llc --mtriple=loongarch64 -mattr=+d --verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=LA64
6
7declare void @callee(ptr, ptr)
8
9define void @caller(i32 %n) {
10; LA32-LABEL: caller:
11; LA32:       # %bb.0:
12; LA32-NEXT:    addi.w $sp, $sp, -64
13; LA32-NEXT:    .cfi_def_cfa_offset 64
14; LA32-NEXT:    st.w $ra, $sp, 60 # 4-byte Folded Spill
15; LA32-NEXT:    st.w $fp, $sp, 56 # 4-byte Folded Spill
16; LA32-NEXT:    st.w $s8, $sp, 52 # 4-byte Folded Spill
17; LA32-NEXT:    .cfi_offset 1, -4
18; LA32-NEXT:    .cfi_offset 22, -8
19; LA32-NEXT:    .cfi_offset 31, -12
20; LA32-NEXT:    addi.w $fp, $sp, 64
21; LA32-NEXT:    .cfi_def_cfa 22, 0
22; LA32-NEXT:    bstrins.w $sp, $zero, 5, 0
23; LA32-NEXT:    move $s8, $sp
24; LA32-NEXT:    addi.w $a0, $a0, 15
25; LA32-NEXT:    bstrins.w $a0, $zero, 3, 0
26; LA32-NEXT:    sub.w $a0, $sp, $a0
27; LA32-NEXT:    move $sp, $a0
28; LA32-NEXT:    addi.w $a1, $s8, 0
29; LA32-NEXT:    bl %plt(callee)
30; LA32-NEXT:    addi.w $sp, $fp, -64
31; LA32-NEXT:    ld.w $s8, $sp, 52 # 4-byte Folded Reload
32; LA32-NEXT:    ld.w $fp, $sp, 56 # 4-byte Folded Reload
33; LA32-NEXT:    ld.w $ra, $sp, 60 # 4-byte Folded Reload
34; LA32-NEXT:    addi.w $sp, $sp, 64
35; LA32-NEXT:    ret
36;
37; LA64-LABEL: caller:
38; LA64:       # %bb.0:
39; LA64-NEXT:    addi.d $sp, $sp, -64
40; LA64-NEXT:    .cfi_def_cfa_offset 64
41; LA64-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
42; LA64-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
43; LA64-NEXT:    st.d $s8, $sp, 40 # 8-byte Folded Spill
44; LA64-NEXT:    .cfi_offset 1, -8
45; LA64-NEXT:    .cfi_offset 22, -16
46; LA64-NEXT:    .cfi_offset 31, -24
47; LA64-NEXT:    addi.d $fp, $sp, 64
48; LA64-NEXT:    .cfi_def_cfa 22, 0
49; LA64-NEXT:    bstrins.d $sp, $zero, 5, 0
50; LA64-NEXT:    move $s8, $sp
51; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
52; LA64-NEXT:    addi.d $a0, $a0, 15
53; LA64-NEXT:    bstrpick.d $a0, $a0, 32, 4
54; LA64-NEXT:    slli.d $a0, $a0, 4
55; LA64-NEXT:    sub.d $a0, $sp, $a0
56; LA64-NEXT:    move $sp, $a0
57; LA64-NEXT:    addi.d $a1, $s8, 0
58; LA64-NEXT:    bl %plt(callee)
59; LA64-NEXT:    addi.d $sp, $fp, -64
60; LA64-NEXT:    ld.d $s8, $sp, 40 # 8-byte Folded Reload
61; LA64-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
62; LA64-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
63; LA64-NEXT:    addi.d $sp, $sp, 64
64; LA64-NEXT:    ret
65  %1 = alloca i8, i32 %n
66  %2 = alloca i32, align 64
67  call void @callee(ptr %1, ptr %2)
68  ret void
69}
70