1 //===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===// 2 // 3 // This file is distributed under the University of Illinois Open Source 4 // License. See LICENSE.TXT for details. 5 // 6 //===----------------------------------------------------------------------===// 7 // 8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime. 9 // Not intended for direct inclusion. Include sanitizer_atomic.h. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef SANITIZER_ATOMIC_CLANG_H 14 #define SANITIZER_ATOMIC_CLANG_H 15 16 namespace __sanitizer { 17 18 INLINE void atomic_signal_fence(memory_order) { 19 __asm__ __volatile__("" ::: "memory"); 20 } 21 22 INLINE void atomic_thread_fence(memory_order) { 23 __sync_synchronize(); 24 } 25 26 INLINE void proc_yield(int cnt) { 27 __asm__ __volatile__("" ::: "memory"); 28 #if defined(__i386__) || defined(__x86_64__) 29 for (int i = 0; i < cnt; i++) 30 __asm__ __volatile__("pause"); 31 #endif 32 __asm__ __volatile__("" ::: "memory"); 33 } 34 35 template<typename T> 36 INLINE typename T::Type atomic_load( 37 const volatile T *a, memory_order mo) { 38 DCHECK(mo & (memory_order_relaxed | memory_order_consume 39 | memory_order_acquire | memory_order_seq_cst)); 40 DCHECK(!((uptr)a % sizeof(*a))); 41 typename T::Type v; 42 // FIXME(dvyukov): 64-bit load is not atomic on 32-bits. 43 if (mo == memory_order_relaxed) { 44 v = a->val_dont_use; 45 } else { 46 atomic_signal_fence(memory_order_seq_cst); 47 v = a->val_dont_use; 48 atomic_signal_fence(memory_order_seq_cst); 49 } 50 return v; 51 } 52 53 template<typename T> 54 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { 55 DCHECK(mo & (memory_order_relaxed | memory_order_release 56 | memory_order_seq_cst)); 57 DCHECK(!((uptr)a % sizeof(*a))); 58 // FIXME(dvyukov): 64-bit store is not atomic on 32-bits. 59 if (mo == memory_order_relaxed) { 60 a->val_dont_use = v; 61 } else { 62 atomic_signal_fence(memory_order_seq_cst); 63 a->val_dont_use = v; 64 atomic_signal_fence(memory_order_seq_cst); 65 } 66 if (mo == memory_order_seq_cst) 67 atomic_thread_fence(memory_order_seq_cst); 68 } 69 70 template<typename T> 71 INLINE typename T::Type atomic_fetch_add(volatile T *a, 72 typename T::Type v, memory_order mo) { 73 (void)mo; 74 DCHECK(!((uptr)a % sizeof(*a))); 75 return __sync_fetch_and_add(&a->val_dont_use, v); 76 } 77 78 template<typename T> 79 INLINE typename T::Type atomic_fetch_sub(volatile T *a, 80 typename T::Type v, memory_order mo) { 81 (void)mo; 82 DCHECK(!((uptr)a % sizeof(*a))); 83 return __sync_fetch_and_add(&a->val_dont_use, -v); 84 } 85 86 template<typename T> 87 INLINE typename T::Type atomic_exchange(volatile T *a, 88 typename T::Type v, memory_order mo) { 89 DCHECK(!((uptr)a % sizeof(*a))); 90 if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst)) 91 __sync_synchronize(); 92 v = __sync_lock_test_and_set(&a->val_dont_use, v); 93 if (mo == memory_order_seq_cst) 94 __sync_synchronize(); 95 return v; 96 } 97 98 template<typename T> 99 INLINE bool atomic_compare_exchange_strong(volatile T *a, 100 typename T::Type *cmp, 101 typename T::Type xchg, 102 memory_order mo) { 103 typedef typename T::Type Type; 104 Type cmpv = *cmp; 105 Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); 106 if (prev == cmpv) 107 return true; 108 *cmp = prev; 109 return false; 110 } 111 112 template<typename T> 113 INLINE bool atomic_compare_exchange_weak(volatile T *a, 114 typename T::Type *cmp, 115 typename T::Type xchg, 116 memory_order mo) { 117 return atomic_compare_exchange_strong(a, cmp, xchg, mo); 118 } 119 120 } // namespace __sanitizer 121 122 #endif // SANITIZER_ATOMIC_CLANG_H 123