1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #ifndef _LIBCPP___ATOMIC_SUPPORT_GCC_H 10 #define _LIBCPP___ATOMIC_SUPPORT_GCC_H 11 12 #include <__atomic/memory_order.h> 13 #include <__atomic/to_gcc_order.h> 14 #include <__config> 15 #include <__memory/addressof.h> 16 #include <__type_traits/enable_if.h> 17 #include <__type_traits/is_assignable.h> 18 #include <__type_traits/remove_const.h> 19 20 #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) 21 # pragma GCC system_header 22 #endif 23 24 // 25 // This file implements support for GCC-style atomics 26 // 27 28 _LIBCPP_BEGIN_NAMESPACE_STD 29 30 // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because 31 // the default operator= in an object is not volatile, a byte-by-byte copy 32 // is required. 33 template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0> 34 _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) { 35 __a_value = __val; 36 } 37 template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0> 38 _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) { 39 volatile char* __to = reinterpret_cast<volatile char*>(std::addressof(__a_value)); 40 volatile char* __end = __to + sizeof(_Tp); 41 volatile const char* __from = reinterpret_cast<volatile const char*>(std::addressof(__val)); 42 while (__to != __end) 43 *__to++ = *__from++; 44 } 45 46 template <typename _Tp> 47 struct __cxx_atomic_base_impl { 48 _LIBCPP_HIDE_FROM_ABI 49 #ifndef _LIBCPP_CXX03_LANG 50 __cxx_atomic_base_impl() _NOEXCEPT = default; 51 #else 52 __cxx_atomic_base_impl() _NOEXCEPT : __a_value() { 53 } 54 #endif // _LIBCPP_CXX03_LANG 55 _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT : __a_value(value) {} 56 _Tp __a_value; 57 }; 58 59 template <typename _Tp> 60 _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) { 61 __cxx_atomic_assign_volatile(__a->__a_value, __val); 62 } 63 64 template <typename _Tp> 65 _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) { 66 __a->__a_value = __val; 67 } 68 69 _LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_thread_fence(memory_order __order) { 70 __atomic_thread_fence(__to_gcc_order(__order)); 71 } 72 73 _LIBCPP_HIDE_FROM_ABI inline void __cxx_atomic_signal_fence(memory_order __order) { 74 __atomic_signal_fence(__to_gcc_order(__order)); 75 } 76 77 template <typename _Tp> 78 _LIBCPP_HIDE_FROM_ABI void 79 __cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) { 80 __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order)); 81 } 82 83 template <typename _Tp> 84 _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, memory_order __order) { 85 __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order)); 86 } 87 88 template <typename _Tp> 89 _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) { 90 _Tp __ret; 91 __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order)); 92 return __ret; 93 } 94 95 template <typename _Tp> 96 _LIBCPP_HIDE_FROM_ABI void 97 __cxx_atomic_load_inplace(const volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) { 98 __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order)); 99 } 100 101 template <typename _Tp> 102 _LIBCPP_HIDE_FROM_ABI void 103 __cxx_atomic_load_inplace(const __cxx_atomic_base_impl<_Tp>* __a, _Tp* __dst, memory_order __order) { 104 __atomic_load(std::addressof(__a->__a_value), __dst, __to_gcc_order(__order)); 105 } 106 107 template <typename _Tp> 108 _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) { 109 _Tp __ret; 110 __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order)); 111 return __ret; 112 } 113 114 template <typename _Tp> 115 _LIBCPP_HIDE_FROM_ABI _Tp 116 __cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) { 117 _Tp __ret; 118 __atomic_exchange( 119 std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order)); 120 return __ret; 121 } 122 123 template <typename _Tp> 124 _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, memory_order __order) { 125 _Tp __ret; 126 __atomic_exchange( 127 std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order)); 128 return __ret; 129 } 130 131 template <typename _Tp> 132 _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong( 133 volatile __cxx_atomic_base_impl<_Tp>* __a, 134 _Tp* __expected, 135 _Tp __value, 136 memory_order __success, 137 memory_order __failure) { 138 return __atomic_compare_exchange( 139 std::addressof(__a->__a_value), 140 __expected, 141 std::addressof(__value), 142 false, 143 __to_gcc_order(__success), 144 __to_gcc_failure_order(__failure)); 145 } 146 147 template <typename _Tp> 148 _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong( 149 __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) { 150 return __atomic_compare_exchange( 151 std::addressof(__a->__a_value), 152 __expected, 153 std::addressof(__value), 154 false, 155 __to_gcc_order(__success), 156 __to_gcc_failure_order(__failure)); 157 } 158 159 template <typename _Tp> 160 _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak( 161 volatile __cxx_atomic_base_impl<_Tp>* __a, 162 _Tp* __expected, 163 _Tp __value, 164 memory_order __success, 165 memory_order __failure) { 166 return __atomic_compare_exchange( 167 std::addressof(__a->__a_value), 168 __expected, 169 std::addressof(__value), 170 true, 171 __to_gcc_order(__success), 172 __to_gcc_failure_order(__failure)); 173 } 174 175 template <typename _Tp> 176 _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak( 177 __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) { 178 return __atomic_compare_exchange( 179 std::addressof(__a->__a_value), 180 __expected, 181 std::addressof(__value), 182 true, 183 __to_gcc_order(__success), 184 __to_gcc_failure_order(__failure)); 185 } 186 187 template <typename _Tp> 188 struct __skip_amt { 189 enum { value = 1 }; 190 }; 191 192 template <typename _Tp> 193 struct __skip_amt<_Tp*> { 194 enum { value = sizeof(_Tp) }; 195 }; 196 197 // FIXME: Haven't figured out what the spec says about using arrays with 198 // atomic_fetch_add. Force a failure rather than creating bad behavior. 199 template <typename _Tp> 200 struct __skip_amt<_Tp[]> {}; 201 template <typename _Tp, int n> 202 struct __skip_amt<_Tp[n]> {}; 203 204 template <typename _Tp, typename _Td> 205 _LIBCPP_HIDE_FROM_ABI _Tp 206 __cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) { 207 return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order)); 208 } 209 210 template <typename _Tp, typename _Td> 211 _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) { 212 return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order)); 213 } 214 215 template <typename _Tp, typename _Td> 216 _LIBCPP_HIDE_FROM_ABI _Tp 217 __cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) { 218 return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order)); 219 } 220 221 template <typename _Tp, typename _Td> 222 _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, memory_order __order) { 223 return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order)); 224 } 225 226 template <typename _Tp> 227 _LIBCPP_HIDE_FROM_ABI _Tp 228 __cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) { 229 return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order)); 230 } 231 232 template <typename _Tp> 233 _LIBCPP_HIDE_FROM_ABI _Tp 234 __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) { 235 return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order)); 236 } 237 238 template <typename _Tp> 239 _LIBCPP_HIDE_FROM_ABI _Tp 240 __cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) { 241 return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order)); 242 } 243 244 template <typename _Tp> 245 _LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) { 246 return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order)); 247 } 248 249 template <typename _Tp> 250 _LIBCPP_HIDE_FROM_ABI _Tp 251 __cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) { 252 return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order)); 253 } 254 255 template <typename _Tp> 256 _LIBCPP_HIDE_FROM_ABI _Tp 257 __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_order __order) { 258 return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order)); 259 } 260 261 #define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0) 262 263 _LIBCPP_END_NAMESPACE_STD 264 265 #endif // _LIBCPP___ATOMIC_SUPPORT_GCC_H 266