xref: /llvm-project/libcxx/include/__atomic/atomic_ref.h (revision f69585235ec85d54e0f3fc41b2d5700430907f99)
1 // -*- C++ -*-
2 //===----------------------------------------------------------------------===//
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //                        Kokkos v. 4.0
9 //       Copyright (2022) National Technology & Engineering
10 //               Solutions of Sandia, LLC (NTESS).
11 //
12 // Under the terms of Contract DE-NA0003525 with NTESS,
13 // the U.S. Government retains certain rights in this software.
14 //
15 //===---------------------------------------------------------------------===//
16 
17 #ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H
18 #define _LIBCPP___ATOMIC_ATOMIC_REF_H
19 
20 #include <__assert>
21 #include <__atomic/atomic_sync.h>
22 #include <__atomic/check_memory_order.h>
23 #include <__atomic/memory_order.h>
24 #include <__atomic/to_gcc_order.h>
25 #include <__concepts/arithmetic.h>
26 #include <__concepts/same_as.h>
27 #include <__config>
28 #include <__cstddef/byte.h>
29 #include <__cstddef/ptrdiff_t.h>
30 #include <__memory/addressof.h>
31 #include <__type_traits/has_unique_object_representation.h>
32 #include <__type_traits/is_trivially_copyable.h>
33 #include <cstdint>
34 #include <cstring>
35 
36 #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
37 #  pragma GCC system_header
38 #endif
39 
40 _LIBCPP_PUSH_MACROS
41 #include <__undef_macros>
42 
43 _LIBCPP_BEGIN_NAMESPACE_STD
44 
45 #if _LIBCPP_STD_VER >= 20
46 
47 // These types are required to make __atomic_is_always_lock_free work across GCC and Clang.
48 // The purpose of this trick is to make sure that we provide an object with the correct alignment
49 // to __atomic_is_always_lock_free, since that answer depends on the alignment.
50 template <size_t _Alignment>
51 struct __alignment_checker_type {
52   alignas(_Alignment) char __data;
53 };
54 
55 template <size_t _Alignment>
56 struct __get_aligner_instance {
57   static constexpr __alignment_checker_type<_Alignment> __instance{};
58 };
59 
60 template <class _Tp>
61 struct __atomic_ref_base {
62 private:
63   _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept {
64     _Tp* __ptr = std::addressof(__val);
65 #  if __has_builtin(__builtin_clear_padding)
66     __builtin_clear_padding(__ptr);
67 #  endif
68     return __ptr;
69   }
70 
71   _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange(
72       _Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept {
73     if constexpr (
74 #  if __has_builtin(__builtin_clear_padding)
75         has_unique_object_representations_v<_Tp> || floating_point<_Tp>
76 #  else
77         true // NOLINT(readability-simplify-boolean-expr)
78 #  endif
79     ) {
80       return __atomic_compare_exchange(__ptr, __expected, __desired, __is_weak, __success, __failure);
81     } else { // _Tp has padding bits and __builtin_clear_padding is available
82       __clear_padding(*__desired);
83       _Tp __copy = *__expected;
84       __clear_padding(__copy);
85       // The algorithm we use here is basically to perform `__atomic_compare_exchange` on the
86       // values until it has either succeeded, or failed because the value representation of the
87       // objects involved was different. This is why we loop around __atomic_compare_exchange:
88       // we basically loop until its failure is caused by the value representation of the objects
89       // being different, not only their object representation.
90       while (true) {
91         _Tp __prev = __copy;
92         if (__atomic_compare_exchange(__ptr, std::addressof(__copy), __desired, __is_weak, __success, __failure)) {
93           return true;
94         }
95         _Tp __curr = __copy;
96         if (std::memcmp(__clear_padding(__prev), __clear_padding(__curr), sizeof(_Tp)) != 0) {
97           // Value representation without padding bits do not compare equal ->
98           // write the current content of *ptr into *expected
99           std::memcpy(__expected, std::addressof(__copy), sizeof(_Tp));
100           return false;
101         }
102       }
103     }
104   }
105 
106   friend struct __atomic_waitable_traits<__atomic_ref_base<_Tp>>;
107 
108   // require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to at least their size to be potentially
109   // used lock-free
110   static constexpr size_t __min_alignment = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || (sizeof(_Tp) > 16) ? 0 : sizeof(_Tp);
111 
112 public:
113   using value_type = _Tp;
114 
115   static constexpr size_t required_alignment = alignof(_Tp) > __min_alignment ? alignof(_Tp) : __min_alignment;
116 
117   // The __atomic_always_lock_free builtin takes into account the alignment of the pointer if provided,
118   // so we create a fake pointer with a suitable alignment when querying it. Note that we are guaranteed
119   // that the pointer is going to be aligned properly at runtime because that is a (checked) precondition
120   // of atomic_ref's constructor.
121   static constexpr bool is_always_lock_free =
122       __atomic_always_lock_free(sizeof(_Tp), &__get_aligner_instance<required_alignment>::__instance);
123 
124   _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); }
125 
126   _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
127       _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) {
128     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
129         __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst,
130         "atomic_ref: memory order argument to atomic store operation is invalid");
131     __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order));
132   }
133 
134   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept {
135     store(__desired);
136     return __desired;
137   }
138 
139   _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept
140       _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) {
141     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
142         __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
143             __order == memory_order::seq_cst,
144         "atomic_ref: memory order argument to atomic load operation is invalid");
145     alignas(_Tp) byte __mem[sizeof(_Tp)];
146     auto* __ret = reinterpret_cast<_Tp*>(__mem);
147     __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order));
148     return *__ret;
149   }
150 
151   _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); }
152 
153   _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
154     alignas(_Tp) byte __mem[sizeof(_Tp)];
155     auto* __ret = reinterpret_cast<_Tp*>(__mem);
156     __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order));
157     return *__ret;
158   }
159 
160   _LIBCPP_HIDE_FROM_ABI bool
161   compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
162       _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
163     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
164         __failure == memory_order::relaxed || __failure == memory_order::consume ||
165             __failure == memory_order::acquire || __failure == memory_order::seq_cst,
166         "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
167     return __compare_exchange(
168         __ptr_,
169         std::addressof(__expected),
170         std::addressof(__desired),
171         true,
172         std::__to_gcc_order(__success),
173         std::__to_gcc_order(__failure));
174   }
175   _LIBCPP_HIDE_FROM_ABI bool
176   compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
177       _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
178     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
179         __failure == memory_order::relaxed || __failure == memory_order::consume ||
180             __failure == memory_order::acquire || __failure == memory_order::seq_cst,
181         "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
182     return __compare_exchange(
183         __ptr_,
184         std::addressof(__expected),
185         std::addressof(__desired),
186         false,
187         std::__to_gcc_order(__success),
188         std::__to_gcc_order(__failure));
189   }
190 
191   _LIBCPP_HIDE_FROM_ABI bool
192   compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
193     return __compare_exchange(
194         __ptr_,
195         std::addressof(__expected),
196         std::addressof(__desired),
197         true,
198         std::__to_gcc_order(__order),
199         std::__to_gcc_failure_order(__order));
200   }
201   _LIBCPP_HIDE_FROM_ABI bool
202   compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
203     return __compare_exchange(
204         __ptr_,
205         std::addressof(__expected),
206         std::addressof(__desired),
207         false,
208         std::__to_gcc_order(__order),
209         std::__to_gcc_failure_order(__order));
210   }
211 
212   _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
213       _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) {
214     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
215         __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
216             __order == memory_order::seq_cst,
217         "atomic_ref: memory order argument to atomic wait operation is invalid");
218     std::__atomic_wait(*this, __old, __order);
219   }
220   _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); }
221   _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); }
222 
223 protected:
224   using _Aligned_Tp [[__gnu__::__aligned__(required_alignment), __gnu__::__nodebug__]] = _Tp;
225   _Aligned_Tp* __ptr_;
226 
227   _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {}
228 };
229 
230 template <class _Tp>
231 struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> {
232   static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) {
233     return __a.load(__order);
234   }
235   static _LIBCPP_HIDE_FROM_ABI const _Tp* __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) {
236     return __a.__ptr_;
237   }
238 };
239 
240 template <class _Tp>
241 struct atomic_ref : public __atomic_ref_base<_Tp> {
242   static_assert(is_trivially_copyable_v<_Tp>, "std::atomic_ref<T> requires that 'T' be a trivially copyable type");
243 
244   using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>;
245 
246   _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
247     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
248         reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
249         "atomic_ref ctor: referenced object must be aligned to required_alignment");
250   }
251 
252   _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
253 
254   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
255 
256   atomic_ref& operator=(const atomic_ref&) = delete;
257 };
258 
259 template <class _Tp>
260   requires(std::integral<_Tp> && !std::same_as<bool, _Tp>)
261 struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
262   using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>;
263 
264   using difference_type = __base::value_type;
265 
266   _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
267     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
268         reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
269         "atomic_ref ctor: referenced object must be aligned to required_alignment");
270   }
271 
272   _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
273 
274   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
275 
276   atomic_ref& operator=(const atomic_ref&) = delete;
277 
278   _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
279     return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order));
280   }
281   _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
282     return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order));
283   }
284   _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
285     return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order));
286   }
287   _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
288     return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order));
289   }
290   _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
291     return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order));
292   }
293 
294   _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
295   _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); }
296   _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); }
297   _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); }
298   _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
299   _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
300   _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; }
301   _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; }
302   _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; }
303 };
304 
305 template <class _Tp>
306   requires std::floating_point<_Tp>
307 struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
308   using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>;
309 
310   using difference_type = __base::value_type;
311 
312   _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
313     _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
314         reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
315         "atomic_ref ctor: referenced object must be aligned to required_alignment");
316   }
317 
318   _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
319 
320   _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
321 
322   atomic_ref& operator=(const atomic_ref&) = delete;
323 
324   _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
325     _Tp __old = this->load(memory_order_relaxed);
326     _Tp __new = __old + __arg;
327     while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
328       __new = __old + __arg;
329     }
330     return __old;
331   }
332   _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
333     _Tp __old = this->load(memory_order_relaxed);
334     _Tp __new = __old - __arg;
335     while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
336       __new = __old - __arg;
337     }
338     return __old;
339   }
340 
341   _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
342   _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
343 };
344 
345 template <class _Tp>
346 struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> {
347   using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp*>;
348 
349   using difference_type = ptrdiff_t;
350 
351   _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {}
352 
353   _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); }
354 
355   atomic_ref& operator=(const atomic_ref&) = delete;
356 
357   _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
358     return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
359   }
360   _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
361     return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
362   }
363 
364   _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); }
365   _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); }
366   _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; }
367   _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; }
368   _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; }
369   _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; }
370 };
371 
372 _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref);
373 
374 #endif // _LIBCPP_STD_VER >= 20
375 
376 _LIBCPP_END_NAMESPACE_STD
377 
378 _LIBCPP_POP_MACROS
379 
380 #endif // _LIBCPP__ATOMIC_ATOMIC_REF_H
381