xref: /netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/sanitizer_atomic_clang_mips.h (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 //===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 // Not intended for direct inclusion. Include sanitizer_atomic.h.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
14 #define SANITIZER_ATOMIC_CLANG_MIPS_H
15 
16 namespace __sanitizer {
17 
18 // MIPS32 does not support atomics > 4 bytes. To address this lack of
19 // functionality, the sanitizer library provides helper methods which use an
20 // internal spin lock mechanism to emulate atomic oprations when the size is
21 // 8 bytes.
__spin_lock(volatile int * lock)22 static void __spin_lock(volatile int *lock) {
23   while (__sync_lock_test_and_set(lock, 1))
24     while (*lock) {
25     }
26 }
27 
__spin_unlock(volatile int * lock)28 static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
29 
30 // Make sure the lock is on its own cache line to prevent false sharing.
31 // Put it inside a struct that is aligned and padded to the typical MIPS
32 // cacheline which is 32 bytes.
33 static struct {
34   int lock;
35   char pad[32 - sizeof(int)];
36 } __attribute__((aligned(32))) lock = {0, {0}};
37 
38 template <>
atomic_fetch_add(volatile atomic_uint64_t * ptr,atomic_uint64_t::Type val,memory_order mo)39 INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
40                                               atomic_uint64_t::Type val,
41                                               memory_order mo) {
42   DCHECK(mo &
43          (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
44   DCHECK(!((uptr)ptr % sizeof(*ptr)));
45 
46   atomic_uint64_t::Type ret;
47 
48   __spin_lock(&lock.lock);
49   ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
50   ptr->val_dont_use = ret + val;
51   __spin_unlock(&lock.lock);
52 
53   return ret;
54 }
55 
56 template <>
atomic_fetch_sub(volatile atomic_uint64_t * ptr,atomic_uint64_t::Type val,memory_order mo)57 INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
58                                               atomic_uint64_t::Type val,
59                                               memory_order mo) {
60   return atomic_fetch_add(ptr, -val, mo);
61 }
62 
63 template <>
atomic_compare_exchange_strong(volatile atomic_uint64_t * ptr,atomic_uint64_t::Type * cmp,atomic_uint64_t::Type xchg,memory_order mo)64 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
65                                            atomic_uint64_t::Type *cmp,
66                                            atomic_uint64_t::Type xchg,
67                                            memory_order mo) {
68   DCHECK(mo &
69          (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
70   DCHECK(!((uptr)ptr % sizeof(*ptr)));
71 
72   typedef atomic_uint64_t::Type Type;
73   Type cmpv = *cmp;
74   Type prev;
75   bool ret = false;
76 
77   __spin_lock(&lock.lock);
78   prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
79   if (prev == cmpv) {
80     ret = true;
81     ptr->val_dont_use = xchg;
82   }
83   __spin_unlock(&lock.lock);
84 
85   return ret;
86 }
87 
88 template <>
atomic_load(const volatile atomic_uint64_t * ptr,memory_order mo)89 INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
90                                          memory_order mo) {
91   DCHECK(mo &
92          (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
93   DCHECK(!((uptr)ptr % sizeof(*ptr)));
94 
95   atomic_uint64_t::Type zero = 0;
96   volatile atomic_uint64_t *Newptr =
97       const_cast<volatile atomic_uint64_t *>(ptr);
98   return atomic_fetch_add(Newptr, zero, mo);
99 }
100 
101 template <>
atomic_store(volatile atomic_uint64_t * ptr,atomic_uint64_t::Type v,memory_order mo)102 INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
103                          memory_order mo) {
104   DCHECK(mo &
105          (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
106   DCHECK(!((uptr)ptr % sizeof(*ptr)));
107 
108   __spin_lock(&lock.lock);
109   ptr->val_dont_use = v;
110   __spin_unlock(&lock.lock);
111 }
112 
113 }  // namespace __sanitizer
114 
115 #endif  // SANITIZER_ATOMIC_CLANG_MIPS_H
116