xref: /openbsd-src/gnu/llvm/compiler-rt/lib/builtins/atomic.c (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- atomic.c - Implement support functions for atomic operations.------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick //  atomic.c defines a set of functions for performing atomic accesses on
103cab2bb3Spatrick //  arbitrary-sized memory locations.  This design uses locks that should
113cab2bb3Spatrick //  be fast in the uncontended case, for two reasons:
123cab2bb3Spatrick //
133cab2bb3Spatrick //  1) This code must work with C programs that do not link to anything
143cab2bb3Spatrick //     (including pthreads) and so it should not depend on any pthread
153cab2bb3Spatrick //     functions.
163cab2bb3Spatrick //  2) Atomic operations, rather than explicit mutexes, are most commonly used
173cab2bb3Spatrick //     on code where contended operations are rate.
183cab2bb3Spatrick //
193cab2bb3Spatrick //  To avoid needing a per-object lock, this code allocates an array of
203cab2bb3Spatrick //  locks and hashes the object pointers to find the one that it should use.
213cab2bb3Spatrick //  For operations that must be atomic on two locations, the lower lock is
223cab2bb3Spatrick //  always acquired first, to avoid deadlock.
233cab2bb3Spatrick //
243cab2bb3Spatrick //===----------------------------------------------------------------------===//
253cab2bb3Spatrick 
261f9cb04fSpatrick #include <stdbool.h>
27d89ec533Spatrick #include <stddef.h>
283cab2bb3Spatrick #include <stdint.h>
293cab2bb3Spatrick 
303cab2bb3Spatrick #include "assembly.h"
313cab2bb3Spatrick 
32d89ec533Spatrick // We use __builtin_mem* here to avoid dependencies on libc-provided headers.
33d89ec533Spatrick #define memcpy __builtin_memcpy
34d89ec533Spatrick #define memcmp __builtin_memcmp
35d89ec533Spatrick 
363cab2bb3Spatrick // Clang objects if you redefine a builtin.  This little hack allows us to
373cab2bb3Spatrick // define a function with the same name as an intrinsic.
383cab2bb3Spatrick #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
393cab2bb3Spatrick #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
403cab2bb3Spatrick #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
413cab2bb3Spatrick #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(              \
423cab2bb3Spatrick     __atomic_compare_exchange)
43d89ec533Spatrick #pragma redefine_extname __atomic_is_lock_free_c SYMBOL_NAME(                  \
44d89ec533Spatrick     __atomic_is_lock_free)
453cab2bb3Spatrick 
463cab2bb3Spatrick /// Number of locks.  This allocates one page on 32-bit platforms, two on
473cab2bb3Spatrick /// 64-bit.  This can be specified externally if a different trade between
483cab2bb3Spatrick /// memory usage and contention probability is required for a given platform.
493cab2bb3Spatrick #ifndef SPINLOCK_COUNT
503cab2bb3Spatrick #define SPINLOCK_COUNT (1 << 10)
513cab2bb3Spatrick #endif
523cab2bb3Spatrick static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
533cab2bb3Spatrick 
543cab2bb3Spatrick ////////////////////////////////////////////////////////////////////////////////
553cab2bb3Spatrick // Platform-specific lock implementation.  Falls back to spinlocks if none is
563cab2bb3Spatrick // defined.  Each platform should define the Lock type, and corresponding
573cab2bb3Spatrick // lock() and unlock() functions.
583cab2bb3Spatrick ////////////////////////////////////////////////////////////////////////////////
59d89ec533Spatrick #if defined(__FreeBSD__) || defined(__DragonFly__)
603cab2bb3Spatrick #include <errno.h>
613cab2bb3Spatrick // clang-format off
623cab2bb3Spatrick #include <sys/types.h>
633cab2bb3Spatrick #include <machine/atomic.h>
643cab2bb3Spatrick #include <sys/umtx.h>
653cab2bb3Spatrick // clang-format on
663cab2bb3Spatrick typedef struct _usem Lock;
unlock(Lock * l)673cab2bb3Spatrick __inline static void unlock(Lock *l) {
683cab2bb3Spatrick   __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
693cab2bb3Spatrick   __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
703cab2bb3Spatrick   if (l->_has_waiters)
713cab2bb3Spatrick     _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
723cab2bb3Spatrick }
lock(Lock * l)733cab2bb3Spatrick __inline static void lock(Lock *l) {
743cab2bb3Spatrick   uint32_t old = 1;
753cab2bb3Spatrick   while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
763cab2bb3Spatrick                                              &old, 0, __ATOMIC_ACQUIRE,
773cab2bb3Spatrick                                              __ATOMIC_RELAXED)) {
783cab2bb3Spatrick     _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
793cab2bb3Spatrick     old = 1;
803cab2bb3Spatrick   }
813cab2bb3Spatrick }
823cab2bb3Spatrick /// locks for atomic operations
833cab2bb3Spatrick static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
843cab2bb3Spatrick 
853cab2bb3Spatrick #elif defined(__APPLE__)
863cab2bb3Spatrick #include <libkern/OSAtomic.h>
873cab2bb3Spatrick typedef OSSpinLock Lock;
unlock(Lock * l)883cab2bb3Spatrick __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
893cab2bb3Spatrick /// Locks a lock.  In the current implementation, this is potentially
903cab2bb3Spatrick /// unbounded in the contended case.
lock(Lock * l)913cab2bb3Spatrick __inline static void lock(Lock *l) { OSSpinLockLock(l); }
923cab2bb3Spatrick static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
933cab2bb3Spatrick 
943cab2bb3Spatrick #else
95*810390e3Srobert _Static_assert(__atomic_always_lock_free(sizeof(uintptr_t), 0),
96*810390e3Srobert                "Implementation assumes lock-free pointer-size cmpxchg");
973cab2bb3Spatrick typedef _Atomic(uintptr_t) Lock;
983cab2bb3Spatrick /// Unlock a lock.  This is a release operation.
unlock(Lock * l)993cab2bb3Spatrick __inline static void unlock(Lock *l) {
1003cab2bb3Spatrick   __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
1013cab2bb3Spatrick }
1023cab2bb3Spatrick /// Locks a lock.  In the current implementation, this is potentially
1033cab2bb3Spatrick /// unbounded in the contended case.
lock(Lock * l)1043cab2bb3Spatrick __inline static void lock(Lock *l) {
1053cab2bb3Spatrick   uintptr_t old = 0;
1063cab2bb3Spatrick   while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
1073cab2bb3Spatrick                                              __ATOMIC_RELAXED))
1083cab2bb3Spatrick     old = 0;
1093cab2bb3Spatrick }
1103cab2bb3Spatrick /// locks for atomic operations
1113cab2bb3Spatrick static Lock locks[SPINLOCK_COUNT];
1123cab2bb3Spatrick #endif
1133cab2bb3Spatrick 
1143cab2bb3Spatrick /// Returns a lock to use for a given pointer.
lock_for_pointer(void * ptr)1153cab2bb3Spatrick static __inline Lock *lock_for_pointer(void *ptr) {
1163cab2bb3Spatrick   intptr_t hash = (intptr_t)ptr;
1173cab2bb3Spatrick   // Disregard the lowest 4 bits.  We want all values that may be part of the
1183cab2bb3Spatrick   // same memory operation to hash to the same value and therefore use the same
1193cab2bb3Spatrick   // lock.
1203cab2bb3Spatrick   hash >>= 4;
1213cab2bb3Spatrick   // Use the next bits as the basis for the hash
1223cab2bb3Spatrick   intptr_t low = hash & SPINLOCK_MASK;
1233cab2bb3Spatrick   // Now use the high(er) set of bits to perturb the hash, so that we don't
1243cab2bb3Spatrick   // get collisions from atomic fields in a single object
1253cab2bb3Spatrick   hash >>= 16;
1263cab2bb3Spatrick   hash ^= low;
1273cab2bb3Spatrick   // Return a pointer to the word to use
1283cab2bb3Spatrick   return locks + (hash & SPINLOCK_MASK);
1293cab2bb3Spatrick }
1303cab2bb3Spatrick 
131d89ec533Spatrick /// Macros for determining whether a size is lock free.
132d89ec533Spatrick #define ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(size, p)                  \
133d89ec533Spatrick   (__atomic_always_lock_free(size, p) ||                                       \
134d89ec533Spatrick    (__atomic_always_lock_free(size, 0) && ((uintptr_t)p % size) == 0))
135d89ec533Spatrick #define IS_LOCK_FREE_1(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(1, p)
136d89ec533Spatrick #define IS_LOCK_FREE_2(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(2, p)
137d89ec533Spatrick #define IS_LOCK_FREE_4(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(4, p)
138d89ec533Spatrick #define IS_LOCK_FREE_8(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(8, p)
139d89ec533Spatrick #define IS_LOCK_FREE_16(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(16, p)
1403cab2bb3Spatrick 
1413cab2bb3Spatrick /// Macro that calls the compiler-generated lock-free versions of functions
1423cab2bb3Spatrick /// when they exist.
143d89ec533Spatrick #define TRY_LOCK_FREE_CASE(n, type, ptr)                                       \
144d89ec533Spatrick   case n:                                                                      \
145d89ec533Spatrick     if (IS_LOCK_FREE_##n(ptr)) {                                               \
146d89ec533Spatrick       LOCK_FREE_ACTION(type);                                                  \
147d89ec533Spatrick     }                                                                          \
148d89ec533Spatrick     break;
149d89ec533Spatrick #ifdef __SIZEOF_INT128__
150d89ec533Spatrick #define TRY_LOCK_FREE_CASE_16(p) TRY_LOCK_FREE_CASE(16, __uint128_t, p)
151d89ec533Spatrick #else
152d89ec533Spatrick #define TRY_LOCK_FREE_CASE_16(p) /* __uint128_t not available */
153d89ec533Spatrick #endif
154d89ec533Spatrick 
155d89ec533Spatrick #define LOCK_FREE_CASES(ptr)                                                   \
1563cab2bb3Spatrick   do {                                                                         \
1573cab2bb3Spatrick     switch (size) {                                                            \
158d89ec533Spatrick       TRY_LOCK_FREE_CASE(1, uint8_t, ptr)                                      \
159d89ec533Spatrick       TRY_LOCK_FREE_CASE(2, uint16_t, ptr)                                     \
160d89ec533Spatrick       TRY_LOCK_FREE_CASE(4, uint32_t, ptr)                                     \
161d89ec533Spatrick       TRY_LOCK_FREE_CASE(8, uint64_t, ptr)                                     \
162d89ec533Spatrick       TRY_LOCK_FREE_CASE_16(ptr) /* __uint128_t may not be supported */        \
163d89ec533Spatrick     default:                                                                   \
1643cab2bb3Spatrick       break;                                                                   \
1653cab2bb3Spatrick     }                                                                          \
1663cab2bb3Spatrick   } while (0)
1673cab2bb3Spatrick 
168d89ec533Spatrick /// Whether atomic operations for the given size (and alignment) are lock-free.
__atomic_is_lock_free_c(size_t size,void * ptr)169d89ec533Spatrick bool __atomic_is_lock_free_c(size_t size, void *ptr) {
170d89ec533Spatrick #define LOCK_FREE_ACTION(type) return true;
171d89ec533Spatrick   LOCK_FREE_CASES(ptr);
172d89ec533Spatrick #undef LOCK_FREE_ACTION
173d89ec533Spatrick   return false;
174d89ec533Spatrick }
175d89ec533Spatrick 
1763cab2bb3Spatrick /// An atomic load operation.  This is atomic with respect to the source
1773cab2bb3Spatrick /// pointer only.
__atomic_load_c(int size,void * src,void * dest,int model)1783cab2bb3Spatrick void __atomic_load_c(int size, void *src, void *dest, int model) {
1793cab2bb3Spatrick #define LOCK_FREE_ACTION(type)                                                 \
1803cab2bb3Spatrick   *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model);            \
1813cab2bb3Spatrick   return;
182d89ec533Spatrick   LOCK_FREE_CASES(src);
1833cab2bb3Spatrick #undef LOCK_FREE_ACTION
1843cab2bb3Spatrick   Lock *l = lock_for_pointer(src);
1853cab2bb3Spatrick   lock(l);
1863cab2bb3Spatrick   memcpy(dest, src, size);
1873cab2bb3Spatrick   unlock(l);
1883cab2bb3Spatrick }
1893cab2bb3Spatrick 
1903cab2bb3Spatrick /// An atomic store operation.  This is atomic with respect to the destination
1913cab2bb3Spatrick /// pointer only.
__atomic_store_c(int size,void * dest,void * src,int model)1923cab2bb3Spatrick void __atomic_store_c(int size, void *dest, void *src, int model) {
1933cab2bb3Spatrick #define LOCK_FREE_ACTION(type)                                                 \
1943cab2bb3Spatrick   __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model);              \
1953cab2bb3Spatrick   return;
196d89ec533Spatrick   LOCK_FREE_CASES(dest);
1973cab2bb3Spatrick #undef LOCK_FREE_ACTION
1983cab2bb3Spatrick   Lock *l = lock_for_pointer(dest);
1993cab2bb3Spatrick   lock(l);
2003cab2bb3Spatrick   memcpy(dest, src, size);
2013cab2bb3Spatrick   unlock(l);
2023cab2bb3Spatrick }
2033cab2bb3Spatrick 
2043cab2bb3Spatrick /// Atomic compare and exchange operation.  If the value at *ptr is identical
2053cab2bb3Spatrick /// to the value at *expected, then this copies value at *desired to *ptr.  If
2063cab2bb3Spatrick /// they  are not, then this stores the current value from *ptr in *expected.
2073cab2bb3Spatrick ///
2083cab2bb3Spatrick /// This function returns 1 if the exchange takes place or 0 if it fails.
__atomic_compare_exchange_c(int size,void * ptr,void * expected,void * desired,int success,int failure)2093cab2bb3Spatrick int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
2103cab2bb3Spatrick                                 void *desired, int success, int failure) {
2113cab2bb3Spatrick #define LOCK_FREE_ACTION(type)                                                 \
2123cab2bb3Spatrick   return __c11_atomic_compare_exchange_strong(                                 \
2133cab2bb3Spatrick       (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success,       \
2143cab2bb3Spatrick       failure)
215d89ec533Spatrick   LOCK_FREE_CASES(ptr);
2163cab2bb3Spatrick #undef LOCK_FREE_ACTION
2173cab2bb3Spatrick   Lock *l = lock_for_pointer(ptr);
2183cab2bb3Spatrick   lock(l);
2193cab2bb3Spatrick   if (memcmp(ptr, expected, size) == 0) {
2203cab2bb3Spatrick     memcpy(ptr, desired, size);
2213cab2bb3Spatrick     unlock(l);
2223cab2bb3Spatrick     return 1;
2233cab2bb3Spatrick   }
2243cab2bb3Spatrick   memcpy(expected, ptr, size);
2253cab2bb3Spatrick   unlock(l);
2263cab2bb3Spatrick   return 0;
2273cab2bb3Spatrick }
2283cab2bb3Spatrick 
2293cab2bb3Spatrick /// Performs an atomic exchange operation between two pointers.  This is atomic
2303cab2bb3Spatrick /// with respect to the target address.
__atomic_exchange_c(int size,void * ptr,void * val,void * old,int model)2313cab2bb3Spatrick void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
2323cab2bb3Spatrick #define LOCK_FREE_ACTION(type)                                                 \
2333cab2bb3Spatrick   *(type *)old =                                                               \
2343cab2bb3Spatrick       __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model);        \
2353cab2bb3Spatrick   return;
236d89ec533Spatrick   LOCK_FREE_CASES(ptr);
2373cab2bb3Spatrick #undef LOCK_FREE_ACTION
2383cab2bb3Spatrick   Lock *l = lock_for_pointer(ptr);
2393cab2bb3Spatrick   lock(l);
2403cab2bb3Spatrick   memcpy(old, ptr, size);
2413cab2bb3Spatrick   memcpy(ptr, val, size);
2423cab2bb3Spatrick   unlock(l);
2433cab2bb3Spatrick }
2443cab2bb3Spatrick 
2453cab2bb3Spatrick ////////////////////////////////////////////////////////////////////////////////
2463cab2bb3Spatrick // Where the size is known at compile time, the compiler may emit calls to
2473cab2bb3Spatrick // specialised versions of the above functions.
2483cab2bb3Spatrick ////////////////////////////////////////////////////////////////////////////////
2493cab2bb3Spatrick #ifdef __SIZEOF_INT128__
2503cab2bb3Spatrick #define OPTIMISED_CASES                                                        \
2513cab2bb3Spatrick   OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)                                   \
2523cab2bb3Spatrick   OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)                                  \
2533cab2bb3Spatrick   OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)                                  \
2543cab2bb3Spatrick   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)                                  \
2553cab2bb3Spatrick   OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
2563cab2bb3Spatrick #else
2573cab2bb3Spatrick #define OPTIMISED_CASES                                                        \
2583cab2bb3Spatrick   OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)                                   \
2593cab2bb3Spatrick   OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)                                  \
2603cab2bb3Spatrick   OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)                                  \
2613cab2bb3Spatrick   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
2623cab2bb3Spatrick #endif
2633cab2bb3Spatrick 
2643cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type)                                      \
2653cab2bb3Spatrick   type __atomic_load_##n(type *src, int model) {                               \
266d89ec533Spatrick     if (lockfree(src))                                                         \
2673cab2bb3Spatrick       return __c11_atomic_load((_Atomic(type) *)src, model);                   \
2683cab2bb3Spatrick     Lock *l = lock_for_pointer(src);                                           \
2693cab2bb3Spatrick     lock(l);                                                                   \
2703cab2bb3Spatrick     type val = *src;                                                           \
2713cab2bb3Spatrick     unlock(l);                                                                 \
2723cab2bb3Spatrick     return val;                                                                \
2733cab2bb3Spatrick   }
2743cab2bb3Spatrick OPTIMISED_CASES
2753cab2bb3Spatrick #undef OPTIMISED_CASE
2763cab2bb3Spatrick 
2773cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type)                                      \
2783cab2bb3Spatrick   void __atomic_store_##n(type *dest, type val, int model) {                   \
279d89ec533Spatrick     if (lockfree(dest)) {                                                      \
2803cab2bb3Spatrick       __c11_atomic_store((_Atomic(type) *)dest, val, model);                   \
2813cab2bb3Spatrick       return;                                                                  \
2823cab2bb3Spatrick     }                                                                          \
2833cab2bb3Spatrick     Lock *l = lock_for_pointer(dest);                                          \
2843cab2bb3Spatrick     lock(l);                                                                   \
2853cab2bb3Spatrick     *dest = val;                                                               \
2863cab2bb3Spatrick     unlock(l);                                                                 \
2873cab2bb3Spatrick     return;                                                                    \
2883cab2bb3Spatrick   }
2893cab2bb3Spatrick OPTIMISED_CASES
2903cab2bb3Spatrick #undef OPTIMISED_CASE
2913cab2bb3Spatrick 
2923cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type)                                      \
2933cab2bb3Spatrick   type __atomic_exchange_##n(type *dest, type val, int model) {                \
294d89ec533Spatrick     if (lockfree(dest))                                                        \
2953cab2bb3Spatrick       return __c11_atomic_exchange((_Atomic(type) *)dest, val, model);         \
2963cab2bb3Spatrick     Lock *l = lock_for_pointer(dest);                                          \
2973cab2bb3Spatrick     lock(l);                                                                   \
2983cab2bb3Spatrick     type tmp = *dest;                                                          \
2993cab2bb3Spatrick     *dest = val;                                                               \
3003cab2bb3Spatrick     unlock(l);                                                                 \
3013cab2bb3Spatrick     return tmp;                                                                \
3023cab2bb3Spatrick   }
3033cab2bb3Spatrick OPTIMISED_CASES
3043cab2bb3Spatrick #undef OPTIMISED_CASE
3053cab2bb3Spatrick 
3063cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type)                                      \
3071f9cb04fSpatrick   bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,  \
3083cab2bb3Spatrick                                      int success, int failure) {               \
309d89ec533Spatrick     if (lockfree(ptr))                                                         \
3103cab2bb3Spatrick       return __c11_atomic_compare_exchange_strong(                             \
3113cab2bb3Spatrick           (_Atomic(type) *)ptr, expected, desired, success, failure);          \
3123cab2bb3Spatrick     Lock *l = lock_for_pointer(ptr);                                           \
3133cab2bb3Spatrick     lock(l);                                                                   \
3143cab2bb3Spatrick     if (*ptr == *expected) {                                                   \
3153cab2bb3Spatrick       *ptr = desired;                                                          \
3163cab2bb3Spatrick       unlock(l);                                                               \
3171f9cb04fSpatrick       return true;                                                             \
3183cab2bb3Spatrick     }                                                                          \
3193cab2bb3Spatrick     *expected = *ptr;                                                          \
3203cab2bb3Spatrick     unlock(l);                                                                 \
3211f9cb04fSpatrick     return false;                                                              \
3223cab2bb3Spatrick   }
3233cab2bb3Spatrick OPTIMISED_CASES
3243cab2bb3Spatrick #undef OPTIMISED_CASE
3253cab2bb3Spatrick 
3263cab2bb3Spatrick ////////////////////////////////////////////////////////////////////////////////
3273cab2bb3Spatrick // Atomic read-modify-write operations for integers of various sizes.
3283cab2bb3Spatrick ////////////////////////////////////////////////////////////////////////////////
3293cab2bb3Spatrick #define ATOMIC_RMW(n, lockfree, type, opname, op)                              \
3303cab2bb3Spatrick   type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {         \
331d89ec533Spatrick     if (lockfree(ptr))                                                         \
3323cab2bb3Spatrick       return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model);    \
3333cab2bb3Spatrick     Lock *l = lock_for_pointer(ptr);                                           \
3343cab2bb3Spatrick     lock(l);                                                                   \
3353cab2bb3Spatrick     type tmp = *ptr;                                                           \
3363cab2bb3Spatrick     *ptr = tmp op val;                                                         \
3373cab2bb3Spatrick     unlock(l);                                                                 \
3383cab2bb3Spatrick     return tmp;                                                                \
3393cab2bb3Spatrick   }
3403cab2bb3Spatrick 
341*810390e3Srobert #define ATOMIC_RMW_NAND(n, lockfree, type)                                     \
342*810390e3Srobert   type __atomic_fetch_nand_##n(type *ptr, type val, int model) {               \
343*810390e3Srobert     if (lockfree(ptr))                                                         \
344*810390e3Srobert       return __c11_atomic_fetch_nand((_Atomic(type) *)ptr, val, model);        \
345*810390e3Srobert     Lock *l = lock_for_pointer(ptr);                                           \
346*810390e3Srobert     lock(l);                                                                   \
347*810390e3Srobert     type tmp = *ptr;                                                           \
348*810390e3Srobert     *ptr = ~(tmp & val);                                                       \
349*810390e3Srobert     unlock(l);                                                                 \
350*810390e3Srobert     return tmp;                                                                \
351*810390e3Srobert   }
352*810390e3Srobert 
3533cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
3543cab2bb3Spatrick OPTIMISED_CASES
3553cab2bb3Spatrick #undef OPTIMISED_CASE
3563cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
3573cab2bb3Spatrick OPTIMISED_CASES
3583cab2bb3Spatrick #undef OPTIMISED_CASE
3593cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
3603cab2bb3Spatrick OPTIMISED_CASES
3613cab2bb3Spatrick #undef OPTIMISED_CASE
3623cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
3633cab2bb3Spatrick OPTIMISED_CASES
3643cab2bb3Spatrick #undef OPTIMISED_CASE
3653cab2bb3Spatrick #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
3663cab2bb3Spatrick OPTIMISED_CASES
3673cab2bb3Spatrick #undef OPTIMISED_CASE
368*810390e3Srobert // Allow build with clang without __c11_atomic_fetch_nand builtin (pre-14)
369*810390e3Srobert #if __has_builtin(__c11_atomic_fetch_nand)
370*810390e3Srobert #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW_NAND(n, lockfree, type)
371*810390e3Srobert OPTIMISED_CASES
372*810390e3Srobert #undef OPTIMISED_CASE
373*810390e3Srobert #endif
374