xref: /llvm-project/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll (revision 4e49c9da14f75f47ca5cb0b4290d9c1c30d3d060)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -passes=gvn < %s | FileCheck %s
3
4@u = global i32 5, align 4
5@w = global i32 10, align 4
6
7define i32 @test_load_seq_cst_unordered() {
8; CHECK-LABEL: @test_load_seq_cst_unordered(
9; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, ptr @w unordered, align 4
10; CHECK-NEXT:    [[LV:%.*]] = load atomic i32, ptr @u seq_cst, align 4
11; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, ptr @w unordered, align 4
12; CHECK-NEXT:    [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
13; CHECK-NEXT:    [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
14; CHECK-NEXT:    ret i32 [[RES]]
15;
16  %l1 = load atomic i32, ptr @w unordered, align 4
17  %lv = load atomic i32, ptr @u seq_cst, align 4
18  %l2 = load atomic i32, ptr @w unordered, align 4
19  %res.1 = sub i32 %l1, %l2
20  %res = add i32 %res.1, %lv
21  ret i32 %res
22}
23
24define i32 @test_load_acquire_unordered() {
25; CHECK-LABEL: @test_load_acquire_unordered(
26; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, ptr @w unordered, align 4
27; CHECK-NEXT:    [[LV:%.*]] = load atomic i32, ptr @u acquire, align 4
28; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, ptr @w unordered, align 4
29; CHECK-NEXT:    [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
30; CHECK-NEXT:    [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
31; CHECK-NEXT:    ret i32 [[RES]]
32;
33  %l1 = load atomic i32, ptr @w unordered, align 4
34  %lv = load atomic i32, ptr @u acquire, align 4
35  %l2 = load atomic i32, ptr @w unordered, align 4
36  %res.1 = sub i32 %l1, %l2
37  %res = add i32 %res.1, %lv
38  ret i32 %res
39}
40
41define i32 @test_store_cst_unordered(i32 %x) {
42; CHECK-LABEL: @test_store_cst_unordered(
43; CHECK-NEXT:    store atomic i32 [[X:%.*]], ptr @u seq_cst, align 4
44; CHECK-NEXT:    ret i32 0
45;
46  %l1 = load atomic i32, ptr @w unordered, align 4
47  store atomic i32 %x, ptr @u seq_cst, align 4
48  %l2 = load atomic i32, ptr @w unordered, align 4
49  %res = sub i32 %l1, %l2
50  ret i32 %res
51}
52
53define i32 @test_store_release_unordered(i32 %x) {
54; CHECK-LABEL: @test_store_release_unordered(
55; CHECK-NEXT:    store atomic i32 [[X:%.*]], ptr @u release, align 4
56; CHECK-NEXT:    ret i32 0
57;
58  %l1 = load atomic i32, ptr @w unordered, align 4
59  store atomic i32 %x, ptr @u release, align 4
60  %l2 = load atomic i32, ptr @w unordered, align 4
61  %res = sub i32 %l1, %l2
62  ret i32 %res
63}
64
65define i32 @test_stores_seq_cst_unordered(i32 %x) {
66; CHECK-LABEL: @test_stores_seq_cst_unordered(
67; CHECK-NEXT:    store atomic i32 [[X:%.*]], ptr @w unordered, align 4
68; CHECK-NEXT:    store atomic i32 [[X]], ptr @u seq_cst, align 4
69; CHECK-NEXT:    store atomic i32 0, ptr @w unordered, align 4
70; CHECK-NEXT:    ret i32 0
71;
72  store atomic i32 %x, ptr @w unordered, align 4
73  store atomic i32 %x, ptr @u seq_cst, align 4
74  store atomic i32 0, ptr @w unordered, align 4
75  ret i32 0
76}
77
78define i32 @test_stores_release_unordered(i32 %x) {
79; CHECK-LABEL: @test_stores_release_unordered(
80; CHECK-NEXT:    store atomic i32 [[X:%.*]], ptr @w unordered, align 4
81; CHECK-NEXT:    store atomic i32 [[X]], ptr @u release, align 4
82; CHECK-NEXT:    store atomic i32 0, ptr @w unordered, align 4
83; CHECK-NEXT:    ret i32 0
84;
85  store atomic i32 %x, ptr @w unordered, align 4
86  store atomic i32 %x, ptr @u release, align 4
87  store atomic i32 0, ptr @w unordered, align 4
88  ret i32 0
89}
90
91
92; Must respect total order for seq_cst even for unrelated addresses
93define i32 @neg_load_seq_cst() {
94; CHECK-LABEL: @neg_load_seq_cst(
95; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, ptr @w seq_cst, align 4
96; CHECK-NEXT:    [[LV:%.*]] = load atomic i32, ptr @u seq_cst, align 4
97; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, ptr @w seq_cst, align 4
98; CHECK-NEXT:    [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
99; CHECK-NEXT:    [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
100; CHECK-NEXT:    ret i32 [[RES]]
101;
102  %l1 = load atomic i32, ptr @w seq_cst, align 4
103  %lv = load atomic i32, ptr @u seq_cst, align 4
104  %l2 = load atomic i32, ptr @w seq_cst, align 4
105  %res.1 = sub i32 %l1, %l2
106  %res = add i32 %res.1, %lv
107  ret i32 %res
108}
109
110define i32 @neg_store_seq_cst(i32 %x) {
111; CHECK-LABEL: @neg_store_seq_cst(
112; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, ptr @w seq_cst, align 4
113; CHECK-NEXT:    store atomic i32 [[X:%.*]], ptr @u seq_cst, align 4
114; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, ptr @w seq_cst, align 4
115; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[L1]], [[L2]]
116; CHECK-NEXT:    ret i32 [[RES]]
117;
118  %l1 = load atomic i32, ptr @w seq_cst, align 4
119  store atomic i32 %x, ptr @u seq_cst, align 4
120  %l2 = load atomic i32, ptr @w seq_cst, align 4
121  %res = sub i32 %l1, %l2
122  ret i32 %res
123}
124
125define i32 @neg_stores_seq_cst(i32 %x) {
126; CHECK-LABEL: @neg_stores_seq_cst(
127; CHECK-NEXT:    store atomic i32 [[X:%.*]], ptr @w seq_cst, align 4
128; CHECK-NEXT:    store atomic i32 [[X]], ptr @u seq_cst, align 4
129; CHECK-NEXT:    store atomic i32 0, ptr @w seq_cst, align 4
130; CHECK-NEXT:    ret i32 0
131;
132  store atomic i32 %x, ptr @w seq_cst, align 4
133  store atomic i32 %x, ptr @u seq_cst, align 4
134  store atomic i32 0, ptr @w seq_cst, align 4
135  ret i32 0
136}
137