xref: /llvm-project/llvm/test/Analysis/MemorySSA/atomic-clobber.ll (revision 8e44f13c6d294e6b4864441b22045b507782540c)
1; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
2;
3; Ensures that atomic loads count as MemoryDefs
4
5; CHECK-LABEL: define i32 @foo
6define i32 @foo(ptr %a, ptr %b) {
7; CHECK: 1 = MemoryDef(liveOnEntry)
8; CHECK-NEXT: store i32 4
9  store i32 4, ptr %a, align 4
10; CHECK: 2 = MemoryDef(1)
11; CHECK-NEXT: %1 = load atomic i32
12  %1 = load atomic i32, ptr %b acquire, align 4
13; CHECK: MemoryUse(2)
14; CHECK-NEXT: %2 = load i32
15  %2 = load i32, ptr %a, align 4
16  %3 = add i32 %1, %2
17  ret i32 %3
18}
19
20; CHECK-LABEL: define void @bar
21define void @bar(ptr %a) {
22; CHECK: MemoryUse(liveOnEntry)
23; CHECK-NEXT: load atomic i32, ptr %a unordered, align 4
24  load atomic i32, ptr %a unordered, align 4
25; CHECK: 1 = MemoryDef(liveOnEntry)
26; CHECK-NEXT: load atomic i32, ptr %a monotonic, align 4
27  load atomic i32, ptr %a monotonic, align 4
28; CHECK: 2 = MemoryDef(1)
29; CHECK-NEXT: load atomic i32, ptr %a acquire, align 4
30  load atomic i32, ptr %a acquire, align 4
31; CHECK: 3 = MemoryDef(2)
32; CHECK-NEXT: load atomic i32, ptr %a seq_cst, align 4
33  load atomic i32, ptr %a seq_cst, align 4
34  ret void
35}
36
37; CHECK-LABEL: define void @baz
38define void @baz(ptr %a) {
39; CHECK: 1 = MemoryDef(liveOnEntry)
40; CHECK-NEXT: %1 = load atomic i32
41  %1 = load atomic i32, ptr %a acquire, align 4
42; CHECK: MemoryUse(1)
43; CHECK-NEXT: %2 = load atomic i32, ptr %a unordered, align 4
44  %2 = load atomic i32, ptr %a unordered, align 4
45; CHECK: 2 = MemoryDef(1)
46; CHECK-NEXT: %3 = load atomic i32, ptr %a monotonic, align 4
47  %3 = load atomic i32, ptr %a monotonic, align 4
48  ret void
49}
50
51; CHECK-LABEL: define void @fences
52define void @fences(ptr %a) {
53; CHECK: 1 = MemoryDef(liveOnEntry)
54; CHECK-NEXT: fence acquire
55  fence acquire
56; CHECK: MemoryUse(1)
57; CHECK-NEXT: %1 = load i32, ptr %a
58  %1 = load i32, ptr %a
59
60; CHECK: 2 = MemoryDef(1)
61; CHECK-NEXT: fence release
62  fence release
63; CHECK: MemoryUse(2)
64; CHECK-NEXT: %2 = load i32, ptr %a
65  %2 = load i32, ptr %a
66
67; CHECK: 3 = MemoryDef(2)
68; CHECK-NEXT: fence acq_rel
69  fence acq_rel
70; CHECK: MemoryUse(3)
71; CHECK-NEXT: %3 = load i32, ptr %a
72  %3 = load i32, ptr %a
73
74; CHECK: 4 = MemoryDef(3)
75; CHECK-NEXT: fence seq_cst
76  fence seq_cst
77; CHECK: MemoryUse(4)
78; CHECK-NEXT: %4 = load i32, ptr %a
79  %4 = load i32, ptr %a
80  ret void
81}
82
83; CHECK-LABEL: define void @seq_cst_clobber
84define void @seq_cst_clobber(ptr noalias %a, ptr noalias %b) {
85; CHECK: 1 = MemoryDef(liveOnEntry)
86; CHECK-NEXT: %1 = load atomic i32, ptr %a monotonic, align 4
87  load atomic i32, ptr %a monotonic, align 4
88
89; CHECK: 2 = MemoryDef(1)
90; CHECK-NEXT: %2 = load atomic i32, ptr %a seq_cst, align 4
91  load atomic i32, ptr %a seq_cst, align 4
92
93; CHECK: 3 = MemoryDef(2)
94; CHECK-NEXT: load atomic i32, ptr %a monotonic, align 4
95  load atomic i32, ptr %a monotonic, align 4
96
97  ret void
98}
99
100; Ensure that AA hands us MRI_Mod on unreorderable atomic ops.
101;
102; This test is a bit implementation-specific. In particular, it depends on that
103; we pass cmpxchg-load queries to AA, without trying to reason about them on
104; our own.
105;
106; If AA gets more aggressive, we can find another way.
107;
108; CHECK-LABEL: define void @check_aa_is_sane
109define void @check_aa_is_sane(ptr noalias %a, ptr noalias %b) {
110; CHECK: 1 = MemoryDef(liveOnEntry)
111; CHECK-NEXT: cmpxchg ptr %a, i32 0, i32 1 acquire acquire
112  cmpxchg ptr %a, i32 0, i32 1 acquire acquire
113; CHECK: MemoryUse(1)
114; CHECK-NEXT: load i32, ptr %b, align 4
115  load i32, ptr %b, align 4
116
117  ret void
118}
119