xref: /llvm-project/llvm/test/CodeGen/AArch64/memmove-inline.ll (revision a07639f4bb89c4e56afcfcd7935a9438fd2e69f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-ALIGNED
3; RUN: llc -mtriple=aarch64 -mattr=+strict-align < %s | FileCheck %s --check-prefixes=CHECK,CHECK-UNALIGNED
4
5; Small (16 bytes here) unaligned memmove() should be a function call if
6; strict-alignment is turned on.
7define void @t16(ptr %out, ptr %in) {
8; CHECK-ALIGNED-LABEL: t16:
9; CHECK-ALIGNED:       // %bb.0: // %entry
10; CHECK-ALIGNED-NEXT:    ldr q0, [x1]
11; CHECK-ALIGNED-NEXT:    str q0, [x0]
12; CHECK-ALIGNED-NEXT:    ret
13;
14; CHECK-UNALIGNED-LABEL: t16:
15; CHECK-UNALIGNED:       // %bb.0: // %entry
16; CHECK-UNALIGNED-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
17; CHECK-UNALIGNED-NEXT:    .cfi_def_cfa_offset 16
18; CHECK-UNALIGNED-NEXT:    .cfi_offset w30, -16
19; CHECK-UNALIGNED-NEXT:    mov w2, #16 // =0x10
20; CHECK-UNALIGNED-NEXT:    bl memmove
21; CHECK-UNALIGNED-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
22; CHECK-UNALIGNED-NEXT:    ret
23entry:
24  call void @llvm.memmove.p0.p0.i64(ptr %out, ptr %in, i64 16, i1 false)
25  ret void
26}
27
28; Small (16 bytes here) aligned memmove() should be inlined even if
29; strict-alignment is turned on.
30define void @t16_aligned(ptr align 8 %out, ptr align 8 %in) {
31; CHECK-ALIGNED-LABEL: t16_aligned:
32; CHECK-ALIGNED:       // %bb.0: // %entry
33; CHECK-ALIGNED-NEXT:    ldr q0, [x1]
34; CHECK-ALIGNED-NEXT:    str q0, [x0]
35; CHECK-ALIGNED-NEXT:    ret
36;
37; CHECK-UNALIGNED-LABEL: t16_aligned:
38; CHECK-UNALIGNED:       // %bb.0: // %entry
39; CHECK-UNALIGNED-NEXT:    ldp x9, x8, [x1]
40; CHECK-UNALIGNED-NEXT:    stp x9, x8, [x0]
41; CHECK-UNALIGNED-NEXT:    ret
42entry:
43  call void @llvm.memmove.p0.p0.i64(ptr align 8 %out, ptr align 8 %in, i64 16, i1 false)
44  ret void
45}
46
47; Tiny (4 bytes here) unaligned memmove() should be inlined with byte sized
48; loads and stores if strict-alignment is turned on.
49define void @t4(ptr %out, ptr %in) {
50; CHECK-ALIGNED-LABEL: t4:
51; CHECK-ALIGNED:       // %bb.0: // %entry
52; CHECK-ALIGNED-NEXT:    ldr w8, [x1]
53; CHECK-ALIGNED-NEXT:    str w8, [x0]
54; CHECK-ALIGNED-NEXT:    ret
55;
56; CHECK-UNALIGNED-LABEL: t4:
57; CHECK-UNALIGNED:       // %bb.0: // %entry
58; CHECK-UNALIGNED-NEXT:    ldrb w8, [x1, #3]
59; CHECK-UNALIGNED-NEXT:    ldrb w9, [x1, #2]
60; CHECK-UNALIGNED-NEXT:    ldrb w10, [x1]
61; CHECK-UNALIGNED-NEXT:    ldrb w11, [x1, #1]
62; CHECK-UNALIGNED-NEXT:    strb w8, [x0, #3]
63; CHECK-UNALIGNED-NEXT:    strb w9, [x0, #2]
64; CHECK-UNALIGNED-NEXT:    strb w11, [x0, #1]
65; CHECK-UNALIGNED-NEXT:    strb w10, [x0]
66; CHECK-UNALIGNED-NEXT:    ret
67entry:
68  call void @llvm.memmove.p0.p0.i64(ptr %out, ptr %in, i64 4, i1 false)
69  ret void
70}
71
72define void @t256(ptr %out, ptr %in) {
73; CHECK-ALIGNED-LABEL: t256:
74; CHECK-ALIGNED:       // %bb.0: // %entry
75; CHECK-ALIGNED-NEXT:    ldp q0, q1, [x1]
76; CHECK-ALIGNED-NEXT:    ldp q2, q3, [x1, #32]
77; CHECK-ALIGNED-NEXT:    ldp q4, q5, [x1, #64]
78; CHECK-ALIGNED-NEXT:    ldp q6, q7, [x1, #96]
79; CHECK-ALIGNED-NEXT:    ldp q16, q17, [x1, #224]
80; CHECK-ALIGNED-NEXT:    ldp q18, q19, [x1, #128]
81; CHECK-ALIGNED-NEXT:    ldp q20, q21, [x1, #160]
82; CHECK-ALIGNED-NEXT:    ldp q22, q23, [x1, #192]
83; CHECK-ALIGNED-NEXT:    stp q0, q1, [x0]
84; CHECK-ALIGNED-NEXT:    stp q2, q3, [x0, #32]
85; CHECK-ALIGNED-NEXT:    stp q4, q5, [x0, #64]
86; CHECK-ALIGNED-NEXT:    stp q6, q7, [x0, #96]
87; CHECK-ALIGNED-NEXT:    stp q18, q19, [x0, #128]
88; CHECK-ALIGNED-NEXT:    stp q20, q21, [x0, #160]
89; CHECK-ALIGNED-NEXT:    stp q22, q23, [x0, #192]
90; CHECK-ALIGNED-NEXT:    stp q16, q17, [x0, #224]
91; CHECK-ALIGNED-NEXT:    ret
92;
93; CHECK-UNALIGNED-LABEL: t256:
94; CHECK-UNALIGNED:       // %bb.0: // %entry
95; CHECK-UNALIGNED-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
96; CHECK-UNALIGNED-NEXT:    .cfi_def_cfa_offset 16
97; CHECK-UNALIGNED-NEXT:    .cfi_offset w30, -16
98; CHECK-UNALIGNED-NEXT:    mov w2, #256 // =0x100
99; CHECK-UNALIGNED-NEXT:    bl memmove
100; CHECK-UNALIGNED-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
101; CHECK-UNALIGNED-NEXT:    ret
102entry:
103  call void @llvm.memmove.p0.p0.i64(ptr %out, ptr %in, i64 256, i1 false)
104  ret void
105}
106
107define void @t257(ptr %out, ptr %in) {
108; CHECK-LABEL: t257:
109; CHECK:       // %bb.0: // %entry
110; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
111; CHECK-NEXT:    .cfi_def_cfa_offset 16
112; CHECK-NEXT:    .cfi_offset w30, -16
113; CHECK-NEXT:    mov w2, #257 // =0x101
114; CHECK-NEXT:    bl memmove
115; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
116; CHECK-NEXT:    ret
117entry:
118  call void @llvm.memmove.p0.p0.i64(ptr %out, ptr %in, i64 257, i1 false)
119  ret void
120}
121
122declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
123