xref: /llvm-project/libc/src/__support/CPP/atomic.h (revision f75c84674cd8ea3b45b6c711d627144efcf582f5)
1 //===-- A simple equivalent of std::atomic ----------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
10 #define LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
11 
12 #include "src/__support/CPP/type_traits/has_unique_object_representations.h"
13 #include "src/__support/macros/attributes.h"
14 #include "src/__support/macros/config.h"
15 #include "src/__support/macros/properties/architectures.h"
16 
17 #include "type_traits.h"
18 
19 namespace LIBC_NAMESPACE_DECL {
20 namespace cpp {
21 
22 enum class MemoryOrder : int {
23   RELAXED = __ATOMIC_RELAXED,
24   CONSUME = __ATOMIC_CONSUME,
25   ACQUIRE = __ATOMIC_ACQUIRE,
26   RELEASE = __ATOMIC_RELEASE,
27   ACQ_REL = __ATOMIC_ACQ_REL,
28   SEQ_CST = __ATOMIC_SEQ_CST
29 };
30 
31 // These are a clang extension, see the clang documenation for more information:
32 // https://clang.llvm.org/docs/LanguageExtensions.html#scoped-atomic-builtins.
33 enum class MemoryScope : int {
34 #if defined(__MEMORY_SCOPE_SYSTEM) && defined(__MEMORY_SCOPE_DEVICE)
35   SYSTEM = __MEMORY_SCOPE_SYSTEM,
36   DEVICE = __MEMORY_SCOPE_DEVICE,
37 #else
38   SYSTEM = 0,
39   DEVICE = 0,
40 #endif
41 };
42 
43 template <typename T> struct Atomic {
44   static_assert(is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
45                     is_move_constructible_v<T> && is_copy_assignable_v<T> &&
46                     is_move_assignable_v<T>,
47                 "atomic<T> requires T to be trivially copyable, copy "
48                 "constructible, move constructible, copy assignable, "
49                 "and move assignable.");
50 
51   static_assert(cpp::has_unique_object_representations_v<T>,
52                 "atomic<T> in libc only support types whose values has unique "
53                 "object representations.");
54 
55 private:
56   // type conversion helper to avoid long c++ style casts
57   LIBC_INLINE static int order(MemoryOrder mem_ord) {
58     return static_cast<int>(mem_ord);
59   }
60 
61   LIBC_INLINE static int scope(MemoryScope mem_scope) {
62     return static_cast<int>(mem_scope);
63   }
64 
65   LIBC_INLINE static T *addressof(T &ref) { return __builtin_addressof(ref); }
66 
67   // Require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to
68   // at least their size to be potentially used lock-free.
69   LIBC_INLINE_VAR static constexpr size_t MIN_ALIGNMENT =
70       (sizeof(T) & (sizeof(T) - 1)) || (sizeof(T) > 16) ? 0 : sizeof(T);
71 
72   LIBC_INLINE_VAR static constexpr size_t ALIGNMENT = alignof(T) > MIN_ALIGNMENT
73                                                           ? alignof(T)
74                                                           : MIN_ALIGNMENT;
75 
76 public:
77   using value_type = T;
78 
79   // We keep the internal value public so that it can be addressable.
80   // This is useful in places like the Linux futex operations where
81   // we need pointers to the memory of the atomic values. Load and store
82   // operations should be performed using the atomic methods however.
83   alignas(ALIGNMENT) value_type val;
84 
85   LIBC_INLINE constexpr Atomic() = default;
86 
87   // Intializes the value without using atomic operations.
88   LIBC_INLINE constexpr Atomic(value_type v) : val(v) {}
89 
90   LIBC_INLINE Atomic(const Atomic &) = delete;
91   LIBC_INLINE Atomic &operator=(const Atomic &) = delete;
92 
93   // Atomic load.
94   LIBC_INLINE operator T() { return load(); }
95 
96   LIBC_INLINE T
97   load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
98        [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
99     T res;
100 #if __has_builtin(__scoped_atomic_load)
101     __scoped_atomic_load(addressof(val), addressof(res), order(mem_ord),
102                          scope(mem_scope));
103 #else
104     __atomic_load(addressof(val), addressof(res), order(mem_ord));
105 #endif
106     return res;
107   }
108 
109   // Atomic store.
110   LIBC_INLINE T operator=(T rhs) {
111     store(rhs);
112     return rhs;
113   }
114 
115   LIBC_INLINE void
116   store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
117         [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
118 #if __has_builtin(__scoped_atomic_store)
119     __scoped_atomic_store(addressof(val), addressof(rhs), order(mem_ord),
120                           scope(mem_scope));
121 #else
122     __atomic_store(addressof(val), addressof(rhs), order(mem_ord));
123 #endif
124   }
125 
126   // Atomic compare exchange
127   LIBC_INLINE bool compare_exchange_strong(
128       T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
129       [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
130     return __atomic_compare_exchange(addressof(val), addressof(expected),
131                                      addressof(desired), false, order(mem_ord),
132                                      order(mem_ord));
133   }
134 
135   // Atomic compare exchange (separate success and failure memory orders)
136   LIBC_INLINE bool compare_exchange_strong(
137       T &expected, T desired, MemoryOrder success_order,
138       MemoryOrder failure_order,
139       [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
140     return __atomic_compare_exchange(
141         addressof(val), addressof(expected), addressof(desired), false,
142         order(success_order), order(failure_order));
143   }
144 
145   // Atomic compare exchange (weak version)
146   LIBC_INLINE bool compare_exchange_weak(
147       T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
148       [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
149     return __atomic_compare_exchange(addressof(val), addressof(expected),
150                                      addressof(desired), true, order(mem_ord),
151                                      order(mem_ord));
152   }
153 
154   // Atomic compare exchange (weak version with separate success and failure
155   // memory orders)
156   LIBC_INLINE bool compare_exchange_weak(
157       T &expected, T desired, MemoryOrder success_order,
158       MemoryOrder failure_order,
159       [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
160     return __atomic_compare_exchange(
161         addressof(val), addressof(expected), addressof(desired), true,
162         order(success_order), order(failure_order));
163   }
164 
165   LIBC_INLINE T
166   exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
167            [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
168     T ret;
169 #if __has_builtin(__scoped_atomic_exchange)
170     __scoped_atomic_exchange(addressof(val), addressof(desired), addressof(ret),
171                              order(mem_ord), scope(mem_scope));
172 #else
173     __atomic_exchange(addressof(val), addressof(desired), addressof(ret),
174                       order(mem_ord));
175 #endif
176     return ret;
177   }
178 
179   LIBC_INLINE T
180   fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
181             [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
182     static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
183 #if __has_builtin(__scoped_atomic_fetch_add)
184     return __scoped_atomic_fetch_add(addressof(val), increment, order(mem_ord),
185                                      scope(mem_scope));
186 #else
187     return __atomic_fetch_add(addressof(val), increment, order(mem_ord));
188 #endif
189   }
190 
191   LIBC_INLINE T
192   fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
193            [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
194     static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
195 #if __has_builtin(__scoped_atomic_fetch_or)
196     return __scoped_atomic_fetch_or(addressof(val), mask, order(mem_ord),
197                                     scope(mem_scope));
198 #else
199     return __atomic_fetch_or(addressof(val), mask, order(mem_ord));
200 #endif
201   }
202 
203   LIBC_INLINE T
204   fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
205             [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
206     static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
207 #if __has_builtin(__scoped_atomic_fetch_and)
208     return __scoped_atomic_fetch_and(addressof(val), mask, order(mem_ord),
209                                      scope(mem_scope));
210 #else
211     return __atomic_fetch_and(addressof(val), mask, order(mem_ord));
212 #endif
213   }
214 
215   LIBC_INLINE T
216   fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
217             [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
218     static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
219 #if __has_builtin(__scoped_atomic_fetch_sub)
220     return __scoped_atomic_fetch_sub(addressof(val), decrement, order(mem_ord),
221                                      scope(mem_scope));
222 #else
223     return __atomic_fetch_sub(addressof(val), decrement, order(mem_ord));
224 #endif
225   }
226 
227   // Set the value without using an atomic operation. This is useful
228   // in initializing atomic values without a constructor.
229   LIBC_INLINE void set(T rhs) { val = rhs; }
230 };
231 
232 // Issue a thread fence with the given memory ordering.
233 LIBC_INLINE void atomic_thread_fence(
234     MemoryOrder mem_ord,
235     [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
236 #if __has_builtin(__scoped_atomic_thread_fence)
237   __scoped_atomic_thread_fence(static_cast<int>(mem_ord),
238                                static_cast<int>(mem_scope));
239 #else
240   __atomic_thread_fence(static_cast<int>(mem_ord));
241 #endif
242 }
243 
244 // Establishes memory synchronization ordering of non-atomic and relaxed atomic
245 // accesses, as instructed by order, between a thread and a signal handler
246 // executed on the same thread. This is equivalent to atomic_thread_fence,
247 // except no instructions for memory ordering are issued. Only reordering of
248 // the instructions by the compiler is suppressed as order instructs.
249 LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
250 #if __has_builtin(__atomic_signal_fence)
251   __atomic_signal_fence(static_cast<int>(mem_ord));
252 #else
253   // if the builtin is not ready, use asm as a full compiler barrier.
254   asm volatile("" ::: "memory");
255 #endif
256 }
257 
258 } // namespace cpp
259 } // namespace LIBC_NAMESPACE_DECL
260 
261 #endif // LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
262