xref: /netbsd-src/external/apache2/llvm/dist/libcxx/src/memory.cpp (revision 4d6fc14bc9b0c5bf3e30be318c143ee82cadd108)
1*4d6fc14bSjoerg //===------------------------ memory.cpp ----------------------------------===//
2*4d6fc14bSjoerg //
3*4d6fc14bSjoerg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*4d6fc14bSjoerg // See https://llvm.org/LICENSE.txt for license information.
5*4d6fc14bSjoerg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*4d6fc14bSjoerg //
7*4d6fc14bSjoerg //===----------------------------------------------------------------------===//
8*4d6fc14bSjoerg 
9*4d6fc14bSjoerg #include "memory"
10*4d6fc14bSjoerg #ifndef _LIBCPP_HAS_NO_THREADS
11*4d6fc14bSjoerg #include "mutex"
12*4d6fc14bSjoerg #include "thread"
13*4d6fc14bSjoerg #if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB)
14*4d6fc14bSjoerg #pragma comment(lib, "pthread")
15*4d6fc14bSjoerg #endif
16*4d6fc14bSjoerg #endif
17*4d6fc14bSjoerg #include "include/atomic_support.h"
18*4d6fc14bSjoerg 
19*4d6fc14bSjoerg _LIBCPP_BEGIN_NAMESPACE_STD
20*4d6fc14bSjoerg 
21*4d6fc14bSjoerg const allocator_arg_t allocator_arg = allocator_arg_t();
22*4d6fc14bSjoerg 
~bad_weak_ptr()23*4d6fc14bSjoerg bad_weak_ptr::~bad_weak_ptr() noexcept {}
24*4d6fc14bSjoerg 
25*4d6fc14bSjoerg const char*
what() const26*4d6fc14bSjoerg bad_weak_ptr::what() const noexcept
27*4d6fc14bSjoerg {
28*4d6fc14bSjoerg     return "bad_weak_ptr";
29*4d6fc14bSjoerg }
30*4d6fc14bSjoerg 
~__shared_count()31*4d6fc14bSjoerg __shared_count::~__shared_count()
32*4d6fc14bSjoerg {
33*4d6fc14bSjoerg }
34*4d6fc14bSjoerg 
~__shared_weak_count()35*4d6fc14bSjoerg __shared_weak_count::~__shared_weak_count()
36*4d6fc14bSjoerg {
37*4d6fc14bSjoerg }
38*4d6fc14bSjoerg 
39*4d6fc14bSjoerg #if defined(_LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS)
40*4d6fc14bSjoerg void
__add_shared()41*4d6fc14bSjoerg __shared_count::__add_shared() noexcept
42*4d6fc14bSjoerg {
43*4d6fc14bSjoerg     __libcpp_atomic_refcount_increment(__shared_owners_);
44*4d6fc14bSjoerg }
45*4d6fc14bSjoerg 
46*4d6fc14bSjoerg bool
__release_shared()47*4d6fc14bSjoerg __shared_count::__release_shared() noexcept
48*4d6fc14bSjoerg {
49*4d6fc14bSjoerg     if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1)
50*4d6fc14bSjoerg     {
51*4d6fc14bSjoerg         __on_zero_shared();
52*4d6fc14bSjoerg         return true;
53*4d6fc14bSjoerg     }
54*4d6fc14bSjoerg     return false;
55*4d6fc14bSjoerg }
56*4d6fc14bSjoerg 
57*4d6fc14bSjoerg void
__add_shared()58*4d6fc14bSjoerg __shared_weak_count::__add_shared() noexcept
59*4d6fc14bSjoerg {
60*4d6fc14bSjoerg     __shared_count::__add_shared();
61*4d6fc14bSjoerg }
62*4d6fc14bSjoerg 
63*4d6fc14bSjoerg void
__add_weak()64*4d6fc14bSjoerg __shared_weak_count::__add_weak() noexcept
65*4d6fc14bSjoerg {
66*4d6fc14bSjoerg     __libcpp_atomic_refcount_increment(__shared_weak_owners_);
67*4d6fc14bSjoerg }
68*4d6fc14bSjoerg 
69*4d6fc14bSjoerg void
__release_shared()70*4d6fc14bSjoerg __shared_weak_count::__release_shared() noexcept
71*4d6fc14bSjoerg {
72*4d6fc14bSjoerg     if (__shared_count::__release_shared())
73*4d6fc14bSjoerg         __release_weak();
74*4d6fc14bSjoerg }
75*4d6fc14bSjoerg 
76*4d6fc14bSjoerg #endif // _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS
77*4d6fc14bSjoerg 
78*4d6fc14bSjoerg void
__release_weak()79*4d6fc14bSjoerg __shared_weak_count::__release_weak() noexcept
80*4d6fc14bSjoerg {
81*4d6fc14bSjoerg     // NOTE: The acquire load here is an optimization of the very
82*4d6fc14bSjoerg     // common case where a shared pointer is being destructed while
83*4d6fc14bSjoerg     // having no other contended references.
84*4d6fc14bSjoerg     //
85*4d6fc14bSjoerg     // BENEFIT: We avoid expensive atomic stores like XADD and STREX
86*4d6fc14bSjoerg     // in a common case.  Those instructions are slow and do nasty
87*4d6fc14bSjoerg     // things to caches.
88*4d6fc14bSjoerg     //
89*4d6fc14bSjoerg     // IS THIS SAFE?  Yes.  During weak destruction, if we see that we
90*4d6fc14bSjoerg     // are the last reference, we know that no-one else is accessing
91*4d6fc14bSjoerg     // us. If someone were accessing us, then they would be doing so
92*4d6fc14bSjoerg     // while the last shared / weak_ptr was being destructed, and
93*4d6fc14bSjoerg     // that's undefined anyway.
94*4d6fc14bSjoerg     //
95*4d6fc14bSjoerg     // If we see anything other than a 0, then we have possible
96*4d6fc14bSjoerg     // contention, and need to use an atomicrmw primitive.
97*4d6fc14bSjoerg     // The same arguments don't apply for increment, where it is legal
98*4d6fc14bSjoerg     // (though inadvisable) to share shared_ptr references between
99*4d6fc14bSjoerg     // threads, and have them all get copied at once.  The argument
100*4d6fc14bSjoerg     // also doesn't apply for __release_shared, because an outstanding
101*4d6fc14bSjoerg     // weak_ptr::lock() could read / modify the shared count.
102*4d6fc14bSjoerg     if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0)
103*4d6fc14bSjoerg     {
104*4d6fc14bSjoerg         // no need to do this store, because we are about
105*4d6fc14bSjoerg         // to destroy everything.
106*4d6fc14bSjoerg         //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
107*4d6fc14bSjoerg         __on_zero_shared_weak();
108*4d6fc14bSjoerg     }
109*4d6fc14bSjoerg     else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_) == -1)
110*4d6fc14bSjoerg         __on_zero_shared_weak();
111*4d6fc14bSjoerg }
112*4d6fc14bSjoerg 
113*4d6fc14bSjoerg __shared_weak_count*
lock()114*4d6fc14bSjoerg __shared_weak_count::lock() noexcept
115*4d6fc14bSjoerg {
116*4d6fc14bSjoerg     long object_owners = __libcpp_atomic_load(&__shared_owners_);
117*4d6fc14bSjoerg     while (object_owners != -1)
118*4d6fc14bSjoerg     {
119*4d6fc14bSjoerg         if (__libcpp_atomic_compare_exchange(&__shared_owners_,
120*4d6fc14bSjoerg                                              &object_owners,
121*4d6fc14bSjoerg                                              object_owners+1))
122*4d6fc14bSjoerg             return this;
123*4d6fc14bSjoerg     }
124*4d6fc14bSjoerg     return nullptr;
125*4d6fc14bSjoerg }
126*4d6fc14bSjoerg 
127*4d6fc14bSjoerg const void*
__get_deleter(const type_info &) const128*4d6fc14bSjoerg __shared_weak_count::__get_deleter(const type_info&) const noexcept
129*4d6fc14bSjoerg {
130*4d6fc14bSjoerg     return nullptr;
131*4d6fc14bSjoerg }
132*4d6fc14bSjoerg 
133*4d6fc14bSjoerg #if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
134*4d6fc14bSjoerg 
135*4d6fc14bSjoerg _LIBCPP_SAFE_STATIC static const std::size_t __sp_mut_count = 16;
136*4d6fc14bSjoerg _LIBCPP_SAFE_STATIC static __libcpp_mutex_t mut_back[__sp_mut_count] =
137*4d6fc14bSjoerg {
138*4d6fc14bSjoerg     _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
139*4d6fc14bSjoerg     _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
140*4d6fc14bSjoerg     _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
141*4d6fc14bSjoerg     _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER
142*4d6fc14bSjoerg };
143*4d6fc14bSjoerg 
__sp_mut(void * p)144*4d6fc14bSjoerg _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) noexcept
145*4d6fc14bSjoerg    : __lx(p)
146*4d6fc14bSjoerg {
147*4d6fc14bSjoerg }
148*4d6fc14bSjoerg 
149*4d6fc14bSjoerg void
lock()150*4d6fc14bSjoerg __sp_mut::lock() noexcept
151*4d6fc14bSjoerg {
152*4d6fc14bSjoerg     auto m = static_cast<__libcpp_mutex_t*>(__lx);
153*4d6fc14bSjoerg     unsigned count = 0;
154*4d6fc14bSjoerg     while (!__libcpp_mutex_trylock(m))
155*4d6fc14bSjoerg     {
156*4d6fc14bSjoerg         if (++count > 16)
157*4d6fc14bSjoerg         {
158*4d6fc14bSjoerg             __libcpp_mutex_lock(m);
159*4d6fc14bSjoerg             break;
160*4d6fc14bSjoerg         }
161*4d6fc14bSjoerg         this_thread::yield();
162*4d6fc14bSjoerg     }
163*4d6fc14bSjoerg }
164*4d6fc14bSjoerg 
165*4d6fc14bSjoerg void
unlock()166*4d6fc14bSjoerg __sp_mut::unlock() noexcept
167*4d6fc14bSjoerg {
168*4d6fc14bSjoerg     __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx));
169*4d6fc14bSjoerg }
170*4d6fc14bSjoerg 
171*4d6fc14bSjoerg __sp_mut&
__get_sp_mut(const void * p)172*4d6fc14bSjoerg __get_sp_mut(const void* p)
173*4d6fc14bSjoerg {
174*4d6fc14bSjoerg     static __sp_mut muts[__sp_mut_count]
175*4d6fc14bSjoerg     {
176*4d6fc14bSjoerg         &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3],
177*4d6fc14bSjoerg         &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7],
178*4d6fc14bSjoerg         &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11],
179*4d6fc14bSjoerg         &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15]
180*4d6fc14bSjoerg     };
181*4d6fc14bSjoerg     return muts[hash<const void*>()(p) & (__sp_mut_count-1)];
182*4d6fc14bSjoerg }
183*4d6fc14bSjoerg 
184*4d6fc14bSjoerg #endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
185*4d6fc14bSjoerg 
186*4d6fc14bSjoerg void
declare_reachable(void *)187*4d6fc14bSjoerg declare_reachable(void*)
188*4d6fc14bSjoerg {
189*4d6fc14bSjoerg }
190*4d6fc14bSjoerg 
191*4d6fc14bSjoerg void
declare_no_pointers(char *,size_t)192*4d6fc14bSjoerg declare_no_pointers(char*, size_t)
193*4d6fc14bSjoerg {
194*4d6fc14bSjoerg }
195*4d6fc14bSjoerg 
196*4d6fc14bSjoerg void
undeclare_no_pointers(char *,size_t)197*4d6fc14bSjoerg undeclare_no_pointers(char*, size_t)
198*4d6fc14bSjoerg {
199*4d6fc14bSjoerg }
200*4d6fc14bSjoerg 
201*4d6fc14bSjoerg void*
__undeclare_reachable(void * p)202*4d6fc14bSjoerg __undeclare_reachable(void* p)
203*4d6fc14bSjoerg {
204*4d6fc14bSjoerg     return p;
205*4d6fc14bSjoerg }
206*4d6fc14bSjoerg 
207*4d6fc14bSjoerg void*
align(size_t alignment,size_t size,void * & ptr,size_t & space)208*4d6fc14bSjoerg align(size_t alignment, size_t size, void*& ptr, size_t& space)
209*4d6fc14bSjoerg {
210*4d6fc14bSjoerg     void* r = nullptr;
211*4d6fc14bSjoerg     if (size <= space)
212*4d6fc14bSjoerg     {
213*4d6fc14bSjoerg         char* p1 = static_cast<char*>(ptr);
214*4d6fc14bSjoerg         char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment);
215*4d6fc14bSjoerg         size_t d = static_cast<size_t>(p2 - p1);
216*4d6fc14bSjoerg         if (d <= space - size)
217*4d6fc14bSjoerg         {
218*4d6fc14bSjoerg             r = p2;
219*4d6fc14bSjoerg             ptr = r;
220*4d6fc14bSjoerg             space -= d;
221*4d6fc14bSjoerg         }
222*4d6fc14bSjoerg     }
223*4d6fc14bSjoerg     return r;
224*4d6fc14bSjoerg }
225*4d6fc14bSjoerg 
226*4d6fc14bSjoerg _LIBCPP_END_NAMESPACE_STD
227