1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
10 #define TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
11
12 #include <atomic>
13 #include <cassert>
14 #include <cmath>
15 #include <vector>
16
17 #include "test_macros.h"
18
19 #ifndef TEST_HAS_NO_THREADS
20 # include "make_test_thread.h"
21 # include <thread>
22 #endif
23
24 template <class T>
equals(T x,T y)25 bool equals(T x, T y) {
26 return x == y;
27 }
28
29 template <class T>
make_value(int i)30 T make_value(int i) {
31 assert(i == 0 || i == 1);
32 if constexpr (std::is_pointer_v<T>) {
33 // So that pointers returned can be subtracted from one another
34 static std::remove_const_t<std::remove_pointer_t<T>> d[2];
35 return &d[i];
36 } else {
37 return T(i);
38 }
39 }
40
41 // Test that all threads see the exact same sequence of events
42 // Test will pass 100% if store_op and load_op are correctly
43 // affecting the memory with seq_cst order
44 template <class T, class StoreOp, class LoadOp>
test_seq_cst(StoreOp store_op,LoadOp load_op)45 void test_seq_cst(StoreOp store_op, LoadOp load_op) {
46 #ifndef TEST_HAS_NO_THREADS
47 for (int i = 0; i < 100; ++i) {
48 T old_value(make_value<T>(0));
49 T new_value(make_value<T>(1));
50
51 T copy_x = old_value;
52 std::atomic_ref<T> const x(copy_x);
53 T copy_y = old_value;
54 std::atomic_ref<T> const y(copy_y);
55
56 std::atomic_bool x_updated_first(false);
57 std::atomic_bool y_updated_first(false);
58
59 auto t1 = support::make_test_thread([&] { store_op(x, old_value, new_value); });
60
61 auto t2 = support::make_test_thread([&] { store_op(y, old_value, new_value); });
62
63 auto t3 = support::make_test_thread([&] {
64 while (!equals(load_op(x), new_value)) {
65 std::this_thread::yield();
66 }
67 if (!equals(load_op(y), new_value)) {
68 x_updated_first.store(true, std::memory_order_relaxed);
69 }
70 });
71
72 auto t4 = support::make_test_thread([&] {
73 while (!equals(load_op(y), new_value)) {
74 std::this_thread::yield();
75 }
76 if (!equals(load_op(x), new_value)) {
77 y_updated_first.store(true, std::memory_order_relaxed);
78 }
79 });
80
81 t1.join();
82 t2.join();
83 t3.join();
84 t4.join();
85 // thread 3 and thread 4 cannot see different orders of storing x and y
86 assert(!(x_updated_first && y_updated_first));
87 }
88 #else
89 (void)store_op;
90 (void)load_op;
91 #endif
92 }
93
94 // Test that all writes before the store are seen by other threads after the load
95 // Test will pass 100% if store_op and load_op are correctly
96 // affecting the memory with acquire-release order
97 template <class T, class StoreOp, class LoadOp>
test_acquire_release(StoreOp store_op,LoadOp load_op)98 void test_acquire_release(StoreOp store_op, LoadOp load_op) {
99 #ifndef TEST_HAS_NO_THREADS
100 for (auto i = 0; i < 100; ++i) {
101 T old_value(make_value<T>(0));
102 T new_value(make_value<T>(1));
103
104 T copy = old_value;
105 std::atomic_ref<T> const at(copy);
106 int non_atomic = 5;
107
108 constexpr auto number_of_threads = 8;
109 std::vector<std::thread> threads;
110 threads.reserve(number_of_threads);
111
112 for (auto j = 0; j < number_of_threads; ++j) {
113 threads.push_back(support::make_test_thread([&at, &non_atomic, load_op, new_value] {
114 while (!equals(load_op(at), new_value)) {
115 std::this_thread::yield();
116 }
117 // Other thread's writes before the release store are visible
118 // in this thread's read after the acquire load
119 assert(non_atomic == 6);
120 }));
121 }
122
123 non_atomic = 6;
124 store_op(at, old_value, new_value);
125
126 for (auto& thread : threads) {
127 thread.join();
128 }
129 }
130 #else
131 (void)store_op;
132 (void)load_op;
133 #endif
134 }
135
136 #endif // TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
137