xref: /llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memcpy-inline.ll (revision 46584de02c1a38a0ccde85cb5c16331380966c36)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -mtriple=aarch64-unknown-unknown -global-isel -global-isel-abort=1 -verify-machineinstrs -stop-after=irtranslator %s -o - | FileCheck %s
3
4define void @copy(ptr %dst, ptr %src) {
5  ; CHECK-LABEL: name: copy
6  ; CHECK: bb.1.entry:
7  ; CHECK:   liveins: $x0, $x1
8  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
9  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
10  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
11  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
12  ; CHECK:   G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
13  ; CHECK:   RET_ReallyLR
14entry:
15  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 false)
16  ret void
17}
18
19define void @inline_copy(ptr %dst, ptr %src) {
20  ; CHECK-LABEL: name: inline_copy
21  ; CHECK: bb.1.entry:
22  ; CHECK:   liveins: $x0, $x1
23  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
24  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
25  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
26  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
27  ; CHECK:   G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
28  ; CHECK:   RET_ReallyLR
29entry:
30  call void @llvm.memcpy.inline.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 false)
31  ret void
32}
33
34define void @copy_volatile(ptr %dst, ptr %src) {
35  ; CHECK-LABEL: name: copy_volatile
36  ; CHECK: bb.1.entry:
37  ; CHECK:   liveins: $x0, $x1
38  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
39  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
40  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
41  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
42  ; CHECK:   G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 0 :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
43  ; CHECK:   RET_ReallyLR
44entry:
45  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 true)
46  ret void
47}
48
49define void @inline_copy_volatile(ptr %dst, ptr %src) {
50  ; CHECK-LABEL: name: inline_copy_volatile
51  ; CHECK: bb.1.entry:
52  ; CHECK:   liveins: $x0, $x1
53  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
54  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
55  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
56  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
57  ; CHECK:   G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
58  ; CHECK:   RET_ReallyLR
59entry:
60  call void @llvm.memcpy.inline.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 true)
61  ret void
62}
63
64define void @tail_copy(ptr %dst, ptr %src) {
65  ; CHECK-LABEL: name: tail_copy
66  ; CHECK: bb.1.entry:
67  ; CHECK:   liveins: $x0, $x1
68  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
69  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
70  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
71  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
72  ; CHECK:   G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 1 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
73  ; CHECK:   RET_ReallyLR
74entry:
75  tail call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 false)
76  ret void
77}
78
79define void @tail_inline_copy(ptr %dst, ptr %src) {
80  ; CHECK-LABEL: name: tail_inline_copy
81  ; CHECK: bb.1.entry:
82  ; CHECK:   liveins: $x0, $x1
83  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
84  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
85  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
86  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
87  ; CHECK:   G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
88  ; CHECK:   RET_ReallyLR
89entry:
90  tail call void @llvm.memcpy.inline.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 false)
91  ret void
92}
93
94define void @tail_copy_volatile(ptr %dst, ptr %src) {
95  ; CHECK-LABEL: name: tail_copy_volatile
96  ; CHECK: bb.1.entry:
97  ; CHECK:   liveins: $x0, $x1
98  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
99  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
100  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
101  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
102  ; CHECK:   G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 1 :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
103  ; CHECK:   RET_ReallyLR
104entry:
105  tail call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 true)
106  ret void
107}
108
109define void @tail_inline_copy_volatile(ptr %dst, ptr %src) {
110  ; CHECK-LABEL: name: tail_inline_copy_volatile
111  ; CHECK: bb.1.entry:
112  ; CHECK:   liveins: $x0, $x1
113  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
114  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
115  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
116  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
117  ; CHECK:   G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
118  ; CHECK:   RET_ReallyLR
119entry:
120  tail call void @llvm.memcpy.inline.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 true)
121  ret void
122}
123
124declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) nounwind
125declare void @llvm.memcpy.inline.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) nounwind
126