xref: /llvm-project/llvm/test/CodeGen/RISCV/rv64-trampoline.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV64 %s
4; RUN: llc -mtriple=riscv64-unknown-linux-gnu -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV64-LINUX %s
6
7declare void @llvm.init.trampoline(ptr, ptr, ptr)
8declare ptr @llvm.adjust.trampoline(ptr)
9declare i64 @f(ptr nest, i64)
10
11define i64 @test0(i64 %n, ptr %p) nounwind {
12; RV64-LABEL: test0:
13; RV64:       # %bb.0:
14; RV64-NEXT:    addi sp, sp, -64
15; RV64-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
16; RV64-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
17; RV64-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
18; RV64-NEXT:    mv s0, a0
19; RV64-NEXT:    lui a0, %hi(f)
20; RV64-NEXT:    addi a0, a0, %lo(f)
21; RV64-NEXT:    li a2, 919
22; RV64-NEXT:    lui a3, %hi(.LCPI0_0)
23; RV64-NEXT:    sd a0, 32(sp)
24; RV64-NEXT:    lui a0, 6203
25; RV64-NEXT:    ld a3, %lo(.LCPI0_0)(a3)
26; RV64-NEXT:    addi a0, a0, 643
27; RV64-NEXT:    sw a2, 8(sp)
28; RV64-NEXT:    sw a0, 12(sp)
29; RV64-NEXT:    sd a3, 16(sp)
30; RV64-NEXT:    sd a1, 24(sp)
31; RV64-NEXT:    addi a1, sp, 24
32; RV64-NEXT:    addi a0, sp, 8
33; RV64-NEXT:    addi s1, sp, 8
34; RV64-NEXT:    call __clear_cache
35; RV64-NEXT:    mv a0, s0
36; RV64-NEXT:    jalr s1
37; RV64-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
38; RV64-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
39; RV64-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
40; RV64-NEXT:    addi sp, sp, 64
41; RV64-NEXT:    ret
42;
43; RV64-LINUX-LABEL: test0:
44; RV64-LINUX:       # %bb.0:
45; RV64-LINUX-NEXT:    addi sp, sp, -64
46; RV64-LINUX-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
47; RV64-LINUX-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
48; RV64-LINUX-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
49; RV64-LINUX-NEXT:    mv s0, a0
50; RV64-LINUX-NEXT:    lui a0, %hi(f)
51; RV64-LINUX-NEXT:    addi a0, a0, %lo(f)
52; RV64-LINUX-NEXT:    li a2, 919
53; RV64-LINUX-NEXT:    lui a3, %hi(.LCPI0_0)
54; RV64-LINUX-NEXT:    sd a0, 32(sp)
55; RV64-LINUX-NEXT:    lui a0, 6203
56; RV64-LINUX-NEXT:    ld a3, %lo(.LCPI0_0)(a3)
57; RV64-LINUX-NEXT:    addi a0, a0, 643
58; RV64-LINUX-NEXT:    sw a2, 8(sp)
59; RV64-LINUX-NEXT:    sw a0, 12(sp)
60; RV64-LINUX-NEXT:    sd a3, 16(sp)
61; RV64-LINUX-NEXT:    sd a1, 24(sp)
62; RV64-LINUX-NEXT:    addi a1, sp, 24
63; RV64-LINUX-NEXT:    addi a0, sp, 8
64; RV64-LINUX-NEXT:    addi s1, sp, 8
65; RV64-LINUX-NEXT:    li a2, 0
66; RV64-LINUX-NEXT:    call __riscv_flush_icache
67; RV64-LINUX-NEXT:    mv a0, s0
68; RV64-LINUX-NEXT:    jalr s1
69; RV64-LINUX-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
70; RV64-LINUX-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
71; RV64-LINUX-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
72; RV64-LINUX-NEXT:    addi sp, sp, 64
73; RV64-LINUX-NEXT:    ret
74  %alloca = alloca [32 x i8], align 8
75  call void @llvm.init.trampoline(ptr %alloca, ptr @f, ptr %p)
76  %tramp = call ptr @llvm.adjust.trampoline(ptr %alloca)
77  %ret = call i64 %tramp(i64 %n)
78  ret i64 %ret
79
80}
81