xref: /llvm-project/llvm/test/Analysis/BasicAA/atomic-memory-intrinsics.ll (revision 303c308e452c703c3d47940383ded3b2d3eefd56)
1; RUN: opt -aa-pipeline=basic-aa -passes=aa-eval -print-all-alias-modref-info -disable-output 2>&1 %s | FileCheck %s
2
3declare void @llvm.memset.element.unordered.atomic.p0.i32(ptr, i8, i64, i32)
4
5define void @test_memset_element_unordered_atomic_const_size(ptr noalias %a) {
6; CHECK-LABEL: Function: test_memset_element_unordered_atomic_const_size
7; CHECK:       Just Mod:  Ptr: i8* %a	<->  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %a, i8 0, i64 4, i32 1)
8; CHECK-NEXT:  Just Mod:  Ptr: i8* %a.gep.1	<->  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %a, i8 0, i64 4, i32 1)
9; CHECK-NEXT:  NoModRef:  Ptr: i8* %a.gep.5	<->  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %a, i8 0, i64 4, i32 1)
10;
11entry:
12  load i8, ptr %a
13  call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %a, i8 0, i64 4, i32 1)
14  %a.gep.1 = getelementptr i8, ptr %a, i32 1
15  store i8 0, ptr %a.gep.1
16  %a.gep.5 = getelementptr i8, ptr %a, i32 5
17  store i8 1, ptr %a.gep.5
18  ret void
19}
20
21define void @test_memset_element_unordered_atomic_variable_size(ptr noalias %a, i64 %n) {
22; CHECK-LABEL: Function: test_memset_element_unordered_atomic_variable_size
23; CHECK:       Just Mod:  Ptr: i8* %a	<->  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %a, i8 0, i64 %n, i32 1)
24; CHECK-NEXT:  Just Mod:  Ptr: i8* %a.gep.1	<->  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %a, i8 0, i64 %n, i32 1)
25; CHECK-NEXT:  Just Mod:  Ptr: i8* %a.gep.5	<->  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %a, i8 0, i64 %n, i32 1)
26;
27entry:
28  load i8, ptr %a
29  call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %a, i8 0, i64 %n, i32 1)
30  %a.gep.1 = getelementptr i8, ptr %a, i32 1
31  store i8 0, ptr %a.gep.1
32  %a.gep.5 = getelementptr i8, ptr %a, i32 5
33  store i8 1, ptr %a.gep.5
34  ret void
35}
36
37declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
38
39define void @test_memcpy_element_unordered_atomic_const_size(ptr noalias %a, ptr noalias %b) {
40; CHECK-LABEL: Function: test_memcpy_element_unordered_atomic_const_size
41; CHECK:       Just Ref:  Ptr: i8* %a	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
42; CHECK-NEXT:  Just Mod:  Ptr: i8* %b	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
43; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.1	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
44; CHECK-NEXT:  NoModRef:  Ptr: i8* %a.gep.5	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
45; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.1	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
46; CHECK-NEXT:  NoModRef:  Ptr: i8* %b.gep.5	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
47;
48entry:
49  load i8, ptr %a
50  load i8, ptr %b
51  %a.gep.1 = getelementptr i8, ptr %a, i32 1
52  store i8 0, ptr %a.gep.1
53  %a.gep.5 = getelementptr i8, ptr %a, i32 5
54  store i8 1, ptr %a.gep.5
55  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
56  %b.gep.1 = getelementptr i8, ptr %b, i32 1
57  store i8 0, ptr %b.gep.1
58  %b.gep.5 = getelementptr i8, ptr %b, i32 5
59  store i8 1, ptr %b.gep.5
60  ret void
61}
62
63define void @test_memcpy_element_unordered_atomic_variable_size(ptr noalias %a, ptr noalias %b, i64 %n) {
64; CHECK-LABEL: Function: test_memcpy_element_unordered_atomic_variable_size
65; CHECK:       Just Ref:  Ptr: i8* %a	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
66; CHECK-NEXT:  Just Mod:  Ptr: i8* %b	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
67; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.1	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
68; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.5	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
69; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.1	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
70; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.5	<->  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
71;
72entry:
73  load i8, ptr %a
74  load i8, ptr %b
75  %a.gep.1 = getelementptr i8, ptr %a, i32 1
76  store i8 0, ptr %a.gep.1
77  %a.gep.5 = getelementptr i8, ptr %a, i32 5
78  store i8 1, ptr %a.gep.5
79  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
80  %b.gep.1 = getelementptr i8, ptr %b, i32 1
81  store i8 0, ptr %b.gep.1
82  %b.gep.5 = getelementptr i8, ptr %b, i32 5
83  store i8 1, ptr %b.gep.5
84  ret void
85}
86
87declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
88
89define void @test_memmove_element_unordered_atomic_const_size(ptr noalias %a, ptr noalias %b) {
90; CHECK-LABEL: Function: test_memmove_element_unordered_atomic_const_size
91; CHECK:       Just Ref:  Ptr: i8* %a	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
92; CHECK-NEXT:  Just Mod:  Ptr: i8* %b	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
93; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.1	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
94; CHECK-NEXT:  NoModRef:  Ptr: i8* %a.gep.5	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
95; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.1	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
96; CHECK-NEXT:  NoModRef:  Ptr: i8* %b.gep.5	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
97;
98entry:
99  load i8, ptr %a
100  load i8, ptr %b
101  %a.gep.1 = getelementptr i8, ptr %a, i32 1
102  store i8 0, ptr %a.gep.1
103  %a.gep.5 = getelementptr i8, ptr %a, i32 5
104  store i8 1, ptr %a.gep.5
105  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 4, i32 1)
106  %b.gep.1 = getelementptr i8, ptr %b, i32 1
107  store i8 0, ptr %b.gep.1
108  %b.gep.5 = getelementptr i8, ptr %b, i32 5
109  store i8 1, ptr %b.gep.5
110  ret void
111}
112
113define void @test_memmove_element_unordered_atomic_variable_size(ptr noalias %a, ptr noalias %b, i64 %n) {
114; CHECK-LABEL: Function: test_memmove_element_unordered_atomic_variable_size
115; CHECK:       Just Ref:  Ptr: i8* %a	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
116; CHECK-NEXT:  Just Mod:  Ptr: i8* %b	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
117; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.1	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
118; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.5	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
119; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.1	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
120; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.5	<->  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
121;
122entry:
123  load i8, ptr %a
124  load i8, ptr %b
125  %a.gep.1 = getelementptr i8, ptr %a, i32 1
126  store i8 0, ptr %a.gep.1
127  %a.gep.5 = getelementptr i8, ptr %a, i32 5
128  store i8 1, ptr %a.gep.5
129  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %b, ptr align 1 %a, i64 %n, i32 1)
130  %b.gep.1 = getelementptr i8, ptr %b, i32 1
131  store i8 0, ptr %b.gep.1
132  %b.gep.5 = getelementptr i8, ptr %b, i32 5
133  store i8 1, ptr %b.gep.5
134  ret void
135}
136