xref: /llvm-project/llvm/test/CodeGen/LoongArch/numeric-reg-names.ll (revision 9d4f7f44b64d87d1068859906f43b7ce03a7388b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 -mattr=+d --loongarch-numeric-reg < %s \
3; RUN:   | FileCheck %s --check-prefix=LA32
4; RUN: llc --mtriple=loongarch64 -mattr=+d --loongarch-numeric-reg < %s \
5; RUN:   | FileCheck %s --check-prefix=LA64
6
7@.str_1 = internal constant [7 x i8] c"hello\0A\00"
8
9declare i32 @printf(ptr, ...)
10
11define i32 @main() {
12; LA32-LABEL: main:
13; LA32:       # %bb.0:
14; LA32-NEXT:    addi.w $r3, $r3, -16
15; LA32-NEXT:    .cfi_def_cfa_offset 16
16; LA32-NEXT:    st.w $r1, $r3, 12 # 4-byte Folded Spill
17; LA32-NEXT:    .cfi_offset 1, -4
18; LA32-NEXT:    pcalau12i $r4, %pc_hi20(.str_1)
19; LA32-NEXT:    addi.w $r4, $r4, %pc_lo12(.str_1)
20; LA32-NEXT:    bl %plt(printf)
21; LA32-NEXT:    move $r4, $r0
22; LA32-NEXT:    ld.w $r1, $r3, 12 # 4-byte Folded Reload
23; LA32-NEXT:    addi.w $r3, $r3, 16
24; LA32-NEXT:    ret
25;
26; LA64-LABEL: main:
27; LA64:       # %bb.0:
28; LA64-NEXT:    addi.d $r3, $r3, -16
29; LA64-NEXT:    .cfi_def_cfa_offset 16
30; LA64-NEXT:    st.d $r1, $r3, 8 # 8-byte Folded Spill
31; LA64-NEXT:    .cfi_offset 1, -8
32; LA64-NEXT:    pcalau12i $r4, %pc_hi20(.str_1)
33; LA64-NEXT:    addi.d $r4, $r4, %pc_lo12(.str_1)
34; LA64-NEXT:    bl %plt(printf)
35; LA64-NEXT:    move $r4, $r0
36; LA64-NEXT:    ld.d $r1, $r3, 8 # 8-byte Folded Reload
37; LA64-NEXT:    addi.d $r3, $r3, 16
38; LA64-NEXT:    ret
39  %s = getelementptr [7 x i8], ptr @.str_1, i64 0, i64 0
40  call i32 (ptr, ...) @printf(ptr %s)
41  ret i32 0
42}
43