xref: /llvm-project/llvm/test/CodeGen/LoongArch/stack-realignment-with-variable-sized-objects.ll (revision 1bb7766489803bb5cc4752ecade1164b31b758b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=LA32
4; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=LA64
6
7declare void @callee(ptr, ptr)
8
9define void @caller(i32 %n) {
10; LA32-LABEL: caller:
11; LA32:       # %bb.0:
12; LA32-NEXT:    addi.w $sp, $sp, -64
13; LA32-NEXT:    .cfi_def_cfa_offset 64
14; LA32-NEXT:    st.w $ra, $sp, 60 # 4-byte Folded Spill
15; LA32-NEXT:    st.w $fp, $sp, 56 # 4-byte Folded Spill
16; LA32-NEXT:    st.w $s8, $sp, 52 # 4-byte Folded Spill
17; LA32-NEXT:    .cfi_offset 1, -4
18; LA32-NEXT:    .cfi_offset 22, -8
19; LA32-NEXT:    .cfi_offset 31, -12
20; LA32-NEXT:    addi.w $fp, $sp, 64
21; LA32-NEXT:    .cfi_def_cfa 22, 0
22; LA32-NEXT:    bstrins.w $sp, $zero, 5, 0
23; LA32-NEXT:    move $s8, $sp
24; LA32-NEXT:    addi.w $a0, $a0, 15
25; LA32-NEXT:    addi.w $a1, $zero, -16
26; LA32-NEXT:    and $a0, $a0, $a1
27; LA32-NEXT:    sub.w $a0, $sp, $a0
28; LA32-NEXT:    move $sp, $a0
29; LA32-NEXT:    addi.w $a1, $s8, 0
30; LA32-NEXT:    bl %plt(callee)
31; LA32-NEXT:    addi.w $sp, $fp, -64
32; LA32-NEXT:    ld.w $s8, $sp, 52 # 4-byte Folded Reload
33; LA32-NEXT:    ld.w $fp, $sp, 56 # 4-byte Folded Reload
34; LA32-NEXT:    ld.w $ra, $sp, 60 # 4-byte Folded Reload
35; LA32-NEXT:    addi.w $sp, $sp, 64
36; LA32-NEXT:    ret
37;
38; LA64-LABEL: caller:
39; LA64:       # %bb.0:
40; LA64-NEXT:    addi.d $sp, $sp, -64
41; LA64-NEXT:    .cfi_def_cfa_offset 64
42; LA64-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
43; LA64-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
44; LA64-NEXT:    st.d $s8, $sp, 40 # 8-byte Folded Spill
45; LA64-NEXT:    .cfi_offset 1, -8
46; LA64-NEXT:    .cfi_offset 22, -16
47; LA64-NEXT:    .cfi_offset 31, -24
48; LA64-NEXT:    addi.d $fp, $sp, 64
49; LA64-NEXT:    .cfi_def_cfa 22, 0
50; LA64-NEXT:    bstrins.d $sp, $zero, 5, 0
51; LA64-NEXT:    move $s8, $sp
52; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
53; LA64-NEXT:    addi.d $a0, $a0, 15
54; LA64-NEXT:    bstrpick.d $a0, $a0, 32, 4
55; LA64-NEXT:    slli.d $a0, $a0, 4
56; LA64-NEXT:    sub.d $a0, $sp, $a0
57; LA64-NEXT:    move $sp, $a0
58; LA64-NEXT:    addi.d $a1, $s8, 0
59; LA64-NEXT:    bl %plt(callee)
60; LA64-NEXT:    addi.d $sp, $fp, -64
61; LA64-NEXT:    ld.d $s8, $sp, 40 # 8-byte Folded Reload
62; LA64-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
63; LA64-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
64; LA64-NEXT:    addi.d $sp, $sp, 64
65; LA64-NEXT:    ret
66  %1 = alloca i8, i32 %n
67  %2 = alloca i32, align 64
68  call void @callee(ptr %1, ptr %2)
69  ret void
70}
71