xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/atomic_helpers.h (revision d89ec533011f513df1010f142a111086a0785f09)
13cab2bb3Spatrick //===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick 
93cab2bb3Spatrick #ifndef SCUDO_ATOMIC_H_
103cab2bb3Spatrick #define SCUDO_ATOMIC_H_
113cab2bb3Spatrick 
123cab2bb3Spatrick #include "internal_defs.h"
133cab2bb3Spatrick 
143cab2bb3Spatrick namespace scudo {
153cab2bb3Spatrick 
163cab2bb3Spatrick enum memory_order {
173cab2bb3Spatrick   memory_order_relaxed = 0,
183cab2bb3Spatrick   memory_order_consume = 1,
193cab2bb3Spatrick   memory_order_acquire = 2,
203cab2bb3Spatrick   memory_order_release = 3,
213cab2bb3Spatrick   memory_order_acq_rel = 4,
223cab2bb3Spatrick   memory_order_seq_cst = 5
233cab2bb3Spatrick };
243cab2bb3Spatrick static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
253cab2bb3Spatrick static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
263cab2bb3Spatrick static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
273cab2bb3Spatrick static_assert(memory_order_release == __ATOMIC_RELEASE, "");
283cab2bb3Spatrick static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
293cab2bb3Spatrick static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
303cab2bb3Spatrick 
313cab2bb3Spatrick struct atomic_u8 {
323cab2bb3Spatrick   typedef u8 Type;
333cab2bb3Spatrick   volatile Type ValDoNotUse;
343cab2bb3Spatrick };
353cab2bb3Spatrick 
363cab2bb3Spatrick struct atomic_u16 {
373cab2bb3Spatrick   typedef u16 Type;
383cab2bb3Spatrick   volatile Type ValDoNotUse;
393cab2bb3Spatrick };
403cab2bb3Spatrick 
413cab2bb3Spatrick struct atomic_s32 {
423cab2bb3Spatrick   typedef s32 Type;
433cab2bb3Spatrick   volatile Type ValDoNotUse;
443cab2bb3Spatrick };
453cab2bb3Spatrick 
463cab2bb3Spatrick struct atomic_u32 {
473cab2bb3Spatrick   typedef u32 Type;
483cab2bb3Spatrick   volatile Type ValDoNotUse;
493cab2bb3Spatrick };
503cab2bb3Spatrick 
513cab2bb3Spatrick struct atomic_u64 {
523cab2bb3Spatrick   typedef u64 Type;
533cab2bb3Spatrick   // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
541f9cb04fSpatrick   alignas(8) volatile Type ValDoNotUse;
553cab2bb3Spatrick };
563cab2bb3Spatrick 
573cab2bb3Spatrick struct atomic_uptr {
583cab2bb3Spatrick   typedef uptr Type;
593cab2bb3Spatrick   volatile Type ValDoNotUse;
603cab2bb3Spatrick };
613cab2bb3Spatrick 
623cab2bb3Spatrick template <typename T>
atomic_load(const volatile T * A,memory_order MO)633cab2bb3Spatrick inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
643cab2bb3Spatrick   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
653cab2bb3Spatrick   typename T::Type V;
663cab2bb3Spatrick   __atomic_load(&A->ValDoNotUse, &V, MO);
673cab2bb3Spatrick   return V;
683cab2bb3Spatrick }
693cab2bb3Spatrick 
703cab2bb3Spatrick template <typename T>
atomic_store(volatile T * A,typename T::Type V,memory_order MO)713cab2bb3Spatrick inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
723cab2bb3Spatrick   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
733cab2bb3Spatrick   __atomic_store(&A->ValDoNotUse, &V, MO);
743cab2bb3Spatrick }
753cab2bb3Spatrick 
atomic_thread_fence(memory_order)763cab2bb3Spatrick inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
773cab2bb3Spatrick 
783cab2bb3Spatrick template <typename T>
atomic_fetch_add(volatile T * A,typename T::Type V,memory_order MO)793cab2bb3Spatrick inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
803cab2bb3Spatrick                                          memory_order MO) {
813cab2bb3Spatrick   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
823cab2bb3Spatrick   return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
833cab2bb3Spatrick }
843cab2bb3Spatrick 
853cab2bb3Spatrick template <typename T>
atomic_fetch_sub(volatile T * A,typename T::Type V,memory_order MO)863cab2bb3Spatrick inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
873cab2bb3Spatrick                                          memory_order MO) {
883cab2bb3Spatrick   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
893cab2bb3Spatrick   return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
903cab2bb3Spatrick }
913cab2bb3Spatrick 
923cab2bb3Spatrick template <typename T>
atomic_fetch_and(volatile T * A,typename T::Type V,memory_order MO)93*d89ec533Spatrick inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
94*d89ec533Spatrick                                          memory_order MO) {
95*d89ec533Spatrick   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
96*d89ec533Spatrick   return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
97*d89ec533Spatrick }
98*d89ec533Spatrick 
99*d89ec533Spatrick template <typename T>
atomic_fetch_or(volatile T * A,typename T::Type V,memory_order MO)100*d89ec533Spatrick inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
101*d89ec533Spatrick                                         memory_order MO) {
102*d89ec533Spatrick   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
103*d89ec533Spatrick   return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
104*d89ec533Spatrick }
105*d89ec533Spatrick 
106*d89ec533Spatrick template <typename T>
atomic_exchange(volatile T * A,typename T::Type V,memory_order MO)1073cab2bb3Spatrick inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
1083cab2bb3Spatrick                                         memory_order MO) {
1093cab2bb3Spatrick   DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
1103cab2bb3Spatrick   typename T::Type R;
1113cab2bb3Spatrick   __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
1123cab2bb3Spatrick   return R;
1133cab2bb3Spatrick }
1143cab2bb3Spatrick 
1153cab2bb3Spatrick template <typename T>
atomic_compare_exchange_strong(volatile T * A,typename T::Type * Cmp,typename T::Type Xchg,memory_order MO)1163cab2bb3Spatrick inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
1173cab2bb3Spatrick                                            typename T::Type Xchg,
1183cab2bb3Spatrick                                            memory_order MO) {
1193cab2bb3Spatrick   return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
1203cab2bb3Spatrick                                    __ATOMIC_RELAXED);
1213cab2bb3Spatrick }
1223cab2bb3Spatrick 
1233cab2bb3Spatrick // Clutter-reducing helpers.
1243cab2bb3Spatrick 
1253cab2bb3Spatrick template <typename T>
atomic_load_relaxed(const volatile T * A)1263cab2bb3Spatrick inline typename T::Type atomic_load_relaxed(const volatile T *A) {
1273cab2bb3Spatrick   return atomic_load(A, memory_order_relaxed);
1283cab2bb3Spatrick }
1293cab2bb3Spatrick 
1303cab2bb3Spatrick template <typename T>
atomic_store_relaxed(volatile T * A,typename T::Type V)1313cab2bb3Spatrick inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
1323cab2bb3Spatrick   atomic_store(A, V, memory_order_relaxed);
1333cab2bb3Spatrick }
1343cab2bb3Spatrick 
1353cab2bb3Spatrick template <typename T>
atomic_compare_exchange(volatile T * A,typename T::Type Cmp,typename T::Type Xchg)1363cab2bb3Spatrick inline typename T::Type atomic_compare_exchange(volatile T *A,
1373cab2bb3Spatrick                                                 typename T::Type Cmp,
1383cab2bb3Spatrick                                                 typename T::Type Xchg) {
1393cab2bb3Spatrick   atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
1403cab2bb3Spatrick   return Cmp;
1413cab2bb3Spatrick }
1423cab2bb3Spatrick 
1433cab2bb3Spatrick } // namespace scudo
1443cab2bb3Spatrick 
1453cab2bb3Spatrick #endif // SCUDO_ATOMIC_H_
146