xref: /llvm-project/llvm/test/CodeGen/AArch64/reserveXreg.ll (revision 5ddce70ef0e5a641d7fea95e31fc5e2439cb98cb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -reserve-regs-for-regalloc=LR,FP,X28,X27,X26,X25,X24,X23,X22,X21,X20,X19,X18,X17,X16,X15,X14,X13,X12,X11,X10,X9,X8,X7,X6,X5,X4 | FileCheck %s
3; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -reserve-regs-for-regalloc=X30,X29,X28,X27,X26,X25,X24,X23,X22,X21,X20,X19,X18,X17,X16,X15,X14,X13,X12,X11,X10,X9,X8,X7,X6,X5,X4 | FileCheck %s
4
5; LR, FP, X30 and X29 should be correctly recognized and not used.
6
7define void @foo(i64 %v1, i64 %v2, ptr %ptr) {
8; CHECK-LABEL: foo:
9; CHECK:       // %bb.0:
10; CHECK-NEXT:    sub sp, sp, #16
11; CHECK-NEXT:    .cfi_def_cfa_offset 16
12; CHECK-NEXT:    add x3, x0, x1
13; CHECK-NEXT:    str x3, [sp, #8] // 8-byte Folded Spill
14; CHECK-NEXT:    str x3, [x2, #8]
15; CHECK-NEXT:    ldr x3, [x2, #16]
16; CHECK-NEXT:    add x3, x0, x3
17; CHECK-NEXT:    sub x3, x3, x1
18; CHECK-NEXT:    str x3, [x2, #16]
19; CHECK-NEXT:    ldr x3, [sp, #8] // 8-byte Folded Reload
20; CHECK-NEXT:    str x3, [x2, #24]
21; CHECK-NEXT:    str x0, [x2, #32]
22; CHECK-NEXT:    str x1, [x2, #40]
23; CHECK-NEXT:    add sp, sp, #16
24; CHECK-NEXT:    ret
25  %v3 = add i64 %v1, %v2
26  %p1 = getelementptr i64, ptr %ptr, i64 1
27  store volatile i64 %v3, ptr %p1, align 8
28
29  %p2 = getelementptr i64, ptr %ptr, i64 2
30  %v4 = load volatile i64, ptr %p2, align 8
31  %v5 = add i64 %v1, %v4
32  %v6 = sub i64 %v5, %v2
33  store volatile i64 %v6, ptr %p2, align 8
34
35  %p3 = getelementptr i64, ptr %ptr, i64 3
36  store volatile i64 %v3, ptr %p3, align 8
37
38  %p4 = getelementptr i64, ptr %ptr, i64 4
39  store volatile i64 %v1, ptr %p4, align 8
40  %p5 = getelementptr i64, ptr %ptr, i64 5
41  store volatile i64 %v2, ptr %p5, align 8
42  ret void
43}
44