xref: /llvm-project/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll (revision 4e49c9da14f75f47ca5cb0b4290d9c1c30d3d060)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -passes=gvn < %s | FileCheck %s
3
4@u = global i32 5, align 4
5@w = global i32 10, align 4
6
7define i32 @test_load() {
8; CHECK-LABEL: @test_load(
9; CHECK-NEXT:    [[LV:%.*]] = load volatile i32, ptr @u, align 4
10; CHECK-NEXT:    ret i32 [[LV]]
11;
12  %l1 = load atomic i32, ptr @w unordered, align 4
13  %lv = load volatile i32, ptr @u, align 4
14  %l2 = load atomic i32, ptr @w unordered, align 4
15  %res.1 = sub i32 %l1, %l2
16  %res = add i32 %res.1, %lv
17  ret i32 %res
18}
19
20define i32 @test_load_with_acquire_load() {
21; CHECK-LABEL: @test_load_with_acquire_load(
22; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, ptr @w acquire, align 4
23; CHECK-NEXT:    [[LV:%.*]] = load volatile i32, ptr @u, align 4
24; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, ptr @w acquire, align 4
25; CHECK-NEXT:    [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
26; CHECK-NEXT:    [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
27; CHECK-NEXT:    ret i32 [[RES]]
28;
29  %l1 = load atomic i32, ptr @w acquire, align 4
30  %lv = load volatile i32, ptr @u, align 4
31  %l2 = load atomic i32, ptr @w acquire, align 4
32  %res.1 = sub i32 %l1, %l2
33  %res = add i32 %res.1, %lv
34  ret i32 %res
35}
36
37define i32 @test_load_with_seq_cst_load() {
38; CHECK-LABEL: @test_load_with_seq_cst_load(
39; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, ptr @w seq_cst, align 4
40; CHECK-NEXT:    [[LV:%.*]] = load volatile i32, ptr @u, align 4
41; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, ptr @w seq_cst, align 4
42; CHECK-NEXT:    [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
43; CHECK-NEXT:    [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
44; CHECK-NEXT:    ret i32 [[RES]]
45;
46  %l1 = load atomic i32, ptr @w seq_cst, align 4
47  %lv = load volatile i32, ptr @u, align 4
48  %l2 = load atomic i32, ptr @w seq_cst, align 4
49  %res.1 = sub i32 %l1, %l2
50  %res = add i32 %res.1, %lv
51  ret i32 %res
52}
53
54define i32 @test_store(i32 %x) {
55; CHECK-LABEL: @test_store(
56; CHECK-NEXT:    store volatile i32 [[X:%.*]], ptr @u, align 4
57; CHECK-NEXT:    ret i32 0
58;
59  %l1 = load atomic i32, ptr @w unordered, align 4
60  store volatile i32 %x, ptr @u, align 4
61  %l2 = load atomic i32, ptr @w unordered, align 4
62  %res = sub i32 %l1, %l2
63  ret i32 %res
64}
65
66define i32 @test_store_with_acquire_load(i32 %x) {
67; CHECK-LABEL: @test_store_with_acquire_load(
68; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, ptr @w acquire, align 4
69; CHECK-NEXT:    store volatile i32 [[X:%.*]], ptr @u, align 4
70; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, ptr @w acquire, align 4
71; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[L1]], [[L2]]
72; CHECK-NEXT:    ret i32 [[RES]]
73;
74  %l1 = load atomic i32, ptr @w acquire, align 4
75  store volatile i32 %x, ptr @u, align 4
76  %l2 = load atomic i32, ptr @w acquire, align 4
77  %res = sub i32 %l1, %l2
78  ret i32 %res
79}
80
81define i32 @test_store_with_seq_cst_load(i32 %x) {
82; CHECK-LABEL: @test_store_with_seq_cst_load(
83; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, ptr @w seq_cst, align 4
84; CHECK-NEXT:    store volatile i32 [[X:%.*]], ptr @u, align 4
85; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, ptr @w seq_cst, align 4
86; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[L1]], [[L2]]
87; CHECK-NEXT:    ret i32 [[RES]]
88;
89  %l1 = load atomic i32, ptr @w seq_cst, align 4
90  store volatile i32 %x, ptr @u, align 4
91  %l2 = load atomic i32, ptr @w seq_cst, align 4
92  %res = sub i32 %l1, %l2
93  ret i32 %res
94}
95