xref: /llvm-project/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll (revision 35e73e7cc8c4078ea25151da3958a114591bf700)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
2; RUN: llc -mtriple=x86_64-linux-gnu -stop-after=finalize-isel -o - %s | FileCheck --check-prefix=MIR %s
3
4; Ensure that the scoped AA is attached on loads/stores lowered from mem ops.
5
6; Re-evaluate the slot numbers of scopes as that numbering could be changed run-by-run.
7
8; MIR-DAG: ![[DOMAIN:[0-9]+]] = distinct !{!{{[0-9]+}}, !"bax"}
9; MIR-DAG: ![[SCOPE0:[0-9]+]] = distinct !{!{{[0-9]+}}, ![[DOMAIN]], !"bax: %p"}
10; MIR-DAG: ![[SCOPE1:[0-9]+]] = distinct !{!{{[0-9]+}}, ![[DOMAIN]], !"bax: %q"}
11; MIR-DAG: ![[SET0:[0-9]+]] = !{![[SCOPE0]]}
12; MIR-DAG: ![[SET1:[0-9]+]] = !{![[SCOPE1]]}
13
14define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
15  ; MIR-LABEL: name: test_memcpy
16  ; MIR: bb.0 (%ir-block.0):
17  ; MIR-NEXT:   liveins: $rdi, $rsi
18  ; MIR-NEXT: {{  $}}
19  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rsi
20  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
21  ; MIR-NEXT:   [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope !0, !noalias !3)
22  ; MIR-NEXT:   [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope !0, !noalias !3)
23  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, killed [[MOV64rm1]] :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope !0, !noalias !3)
24  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, killed [[MOV64rm]] :: (store (s64) into %ir.p0, align 4, !alias.scope !0, !noalias !3)
25  ; MIR-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
26  ; MIR-NEXT:   [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
27  ; MIR-NEXT:   $eax = COPY [[ADD32rm]]
28  ; MIR-NEXT:   RET 0, $eax
29  %p0 = bitcast ptr %p to ptr
30  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
31  %p1 = bitcast ptr %add.ptr to ptr
32  tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
33  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
34  %q1 = getelementptr inbounds i32, ptr %q, i64 1
35  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
36  %add = add i32 %v0, %v1
37  ret i32 %add
38}
39
40define i32 @test_memcpy_inline(ptr nocapture %p, ptr nocapture readonly %q) {
41  ; MIR-LABEL: name: test_memcpy_inline
42  ; MIR: bb.0 (%ir-block.0):
43  ; MIR-NEXT:   liveins: $rdi, $rsi
44  ; MIR-NEXT: {{  $}}
45  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rsi
46  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
47  ; MIR-NEXT:   [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope !0, !noalias !3)
48  ; MIR-NEXT:   [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope !0, !noalias !3)
49  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, killed [[MOV64rm1]] :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope !0, !noalias !3)
50  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, killed [[MOV64rm]] :: (store (s64) into %ir.p0, align 4, !alias.scope !0, !noalias !3)
51  ; MIR-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
52  ; MIR-NEXT:   [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
53  ; MIR-NEXT:   $eax = COPY [[ADD32rm]]
54  ; MIR-NEXT:   RET 0, $eax
55  %p0 = bitcast ptr %p to ptr
56  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
57  %p1 = bitcast ptr %add.ptr to ptr
58  tail call void @llvm.memcpy.inline.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
59  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
60  %q1 = getelementptr inbounds i32, ptr %q, i64 1
61  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
62  %add = add i32 %v0, %v1
63  ret i32 %add
64}
65
66define i32 @test_memmove(ptr nocapture %p, ptr nocapture readonly %q) {
67  ; MIR-LABEL: name: test_memmove
68  ; MIR: bb.0 (%ir-block.0):
69  ; MIR-NEXT:   liveins: $rdi, $rsi
70  ; MIR-NEXT: {{  $}}
71  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rsi
72  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
73  ; MIR-NEXT:   [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope !0, !noalias !3)
74  ; MIR-NEXT:   [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope !0, !noalias !3)
75  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, killed [[MOV64rm]] :: (store (s64) into %ir.p0, align 4, !alias.scope !0, !noalias !3)
76  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, killed [[MOV64rm1]] :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope !0, !noalias !3)
77  ; MIR-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
78  ; MIR-NEXT:   [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
79  ; MIR-NEXT:   $eax = COPY [[ADD32rm]]
80  ; MIR-NEXT:   RET 0, $eax
81  %p0 = bitcast ptr %p to ptr
82  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
83  %p1 = bitcast ptr %add.ptr to ptr
84  tail call void @llvm.memmove.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
85  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
86  %q1 = getelementptr inbounds i32, ptr %q, i64 1
87  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
88  %add = add i32 %v0, %v1
89  ret i32 %add
90}
91
92define i32 @test_memset(ptr nocapture %p, ptr nocapture readonly %q) {
93  ; MIR-LABEL: name: test_memset
94  ; MIR: bb.0 (%ir-block.0):
95  ; MIR-NEXT:   liveins: $rdi, $rsi
96  ; MIR-NEXT: {{  $}}
97  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rsi
98  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
99  ; MIR-NEXT:   [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri -6148914691236517206
100  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, [[MOV64ri]] :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope !0, !noalias !3)
101  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[MOV64ri]] :: (store (s64) into %ir.p0, align 4, !alias.scope !0, !noalias !3)
102  ; MIR-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
103  ; MIR-NEXT:   [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
104  ; MIR-NEXT:   $eax = COPY [[ADD32rm]]
105  ; MIR-NEXT:   RET 0, $eax
106  %p0 = bitcast ptr %p to ptr
107  tail call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, i8 170, i64 16, i1 false), !alias.scope !2, !noalias !4
108  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
109  %q1 = getelementptr inbounds i32, ptr %q, i64 1
110  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
111  %add = add i32 %v0, %v1
112  ret i32 %add
113}
114
115define i32 @test_mempcpy(ptr nocapture %p, ptr nocapture readonly %q) {
116  ; MIR-LABEL: name: test_mempcpy
117  ; MIR: bb.0 (%ir-block.0):
118  ; MIR-NEXT:   liveins: $rdi, $rsi
119  ; MIR-NEXT: {{  $}}
120  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rsi
121  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
122  ; MIR-NEXT:   [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 1, !alias.scope !0, !noalias !3)
123  ; MIR-NEXT:   [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 1, !alias.scope !0, !noalias !3)
124  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, killed [[MOV64rm1]] :: (store (s64) into %ir.p0 + 8, align 1, !alias.scope !0, !noalias !3)
125  ; MIR-NEXT:   MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, killed [[MOV64rm]] :: (store (s64) into %ir.p0, align 1, !alias.scope !0, !noalias !3)
126  ; MIR-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
127  ; MIR-NEXT:   [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
128  ; MIR-NEXT:   $eax = COPY [[ADD32rm]]
129  ; MIR-NEXT:   RET 0, $eax
130  %p0 = bitcast ptr %p to ptr
131  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
132  %p1 = bitcast ptr %add.ptr to ptr
133  %call = tail call ptr @mempcpy(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !2, !noalias !4
134  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
135  %q1 = getelementptr inbounds i32, ptr %q, i64 1
136  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
137  %add = add i32 %v0, %v1
138  ret i32 %add
139}
140
141declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
142declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
143declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg)
144declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
145
146declare ptr @mempcpy(ptr, ptr, i64)
147
148!0 = distinct !{!0, !"bax"}
149!1 = distinct !{!1, !0, !"bax: %p"}
150!2 = !{!1}
151!3 = distinct !{!3, !0, !"bax: %q"}
152!4 = !{!3}
153