xref: /netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h (revision c0a68be459da21030695f60d10265c2fc49758f8)
1 //===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 // Not intended for direct inclusion. Include sanitizer_atomic.h.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
14 #define SANITIZER_ATOMIC_CLANG_OTHER_H
15 
16 namespace __sanitizer {
17 
18 
proc_yield(int cnt)19 INLINE void proc_yield(int cnt) {
20   __asm__ __volatile__("" ::: "memory");
21 }
22 
23 template<typename T>
atomic_load(const volatile T * a,memory_order mo)24 INLINE typename T::Type atomic_load(
25     const volatile T *a, memory_order mo) {
26   DCHECK(mo & (memory_order_relaxed | memory_order_consume
27       | memory_order_acquire | memory_order_seq_cst));
28   DCHECK(!((uptr)a % sizeof(*a)));
29   typename T::Type v;
30 
31   if (sizeof(*a) < 8 || sizeof(void*) == 8) {
32     // Assume that aligned loads are atomic.
33     if (mo == memory_order_relaxed) {
34       v = a->val_dont_use;
35     } else if (mo == memory_order_consume) {
36       // Assume that processor respects data dependencies
37       // (and that compiler won't break them).
38       __asm__ __volatile__("" ::: "memory");
39       v = a->val_dont_use;
40       __asm__ __volatile__("" ::: "memory");
41     } else if (mo == memory_order_acquire) {
42       __asm__ __volatile__("" ::: "memory");
43       v = a->val_dont_use;
44       __sync_synchronize();
45     } else {  // seq_cst
46       // E.g. on POWER we need a hw fence even before the store.
47       __sync_synchronize();
48       v = a->val_dont_use;
49       __sync_synchronize();
50     }
51   } else {
52     // 64-bit load on 32-bit platform.
53     // Gross, but simple and reliable.
54     // Assume that it is not in read-only memory.
55     v = __sync_fetch_and_add(
56         const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
57   }
58   return v;
59 }
60 
61 template<typename T>
atomic_store(volatile T * a,typename T::Type v,memory_order mo)62 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
63   DCHECK(mo & (memory_order_relaxed | memory_order_release
64       | memory_order_seq_cst));
65   DCHECK(!((uptr)a % sizeof(*a)));
66 
67   if (sizeof(*a) < 8 || sizeof(void*) == 8) {
68     // Assume that aligned loads are atomic.
69     if (mo == memory_order_relaxed) {
70       a->val_dont_use = v;
71     } else if (mo == memory_order_release) {
72       __sync_synchronize();
73       a->val_dont_use = v;
74       __asm__ __volatile__("" ::: "memory");
75     } else {  // seq_cst
76       __sync_synchronize();
77       a->val_dont_use = v;
78       __sync_synchronize();
79     }
80   } else {
81     // 64-bit store on 32-bit platform.
82     // Gross, but simple and reliable.
83     typename T::Type cmp = a->val_dont_use;
84     typename T::Type cur;
85     for (;;) {
86       cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
87       if (cur == cmp || cur == v)
88         break;
89       cmp = cur;
90     }
91   }
92 }
93 
94 }  // namespace __sanitizer
95 
96 #endif  // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
97