1 //===------------------------ memory.cpp ----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "memory"
10 #ifndef _LIBCPP_HAS_NO_THREADS
11 #include "mutex"
12 #include "thread"
13 #if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB)
14 #pragma comment(lib, "pthread")
15 #endif
16 #endif
17 #include "include/atomic_support.h"
18
19 _LIBCPP_BEGIN_NAMESPACE_STD
20
21 const allocator_arg_t allocator_arg = allocator_arg_t();
22
~bad_weak_ptr()23 bad_weak_ptr::~bad_weak_ptr() noexcept {}
24
25 const char*
what() const26 bad_weak_ptr::what() const noexcept
27 {
28 return "bad_weak_ptr";
29 }
30
~__shared_count()31 __shared_count::~__shared_count()
32 {
33 }
34
~__shared_weak_count()35 __shared_weak_count::~__shared_weak_count()
36 {
37 }
38
39 #if defined(_LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS)
40 void
__add_shared()41 __shared_count::__add_shared() noexcept
42 {
43 __libcpp_atomic_refcount_increment(__shared_owners_);
44 }
45
46 bool
__release_shared()47 __shared_count::__release_shared() noexcept
48 {
49 if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1)
50 {
51 __on_zero_shared();
52 return true;
53 }
54 return false;
55 }
56
57 void
__add_shared()58 __shared_weak_count::__add_shared() noexcept
59 {
60 __shared_count::__add_shared();
61 }
62
63 void
__add_weak()64 __shared_weak_count::__add_weak() noexcept
65 {
66 __libcpp_atomic_refcount_increment(__shared_weak_owners_);
67 }
68
69 void
__release_shared()70 __shared_weak_count::__release_shared() noexcept
71 {
72 if (__shared_count::__release_shared())
73 __release_weak();
74 }
75
76 #endif // _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS
77
78 void
__release_weak()79 __shared_weak_count::__release_weak() noexcept
80 {
81 // NOTE: The acquire load here is an optimization of the very
82 // common case where a shared pointer is being destructed while
83 // having no other contended references.
84 //
85 // BENEFIT: We avoid expensive atomic stores like XADD and STREX
86 // in a common case. Those instructions are slow and do nasty
87 // things to caches.
88 //
89 // IS THIS SAFE? Yes. During weak destruction, if we see that we
90 // are the last reference, we know that no-one else is accessing
91 // us. If someone were accessing us, then they would be doing so
92 // while the last shared / weak_ptr was being destructed, and
93 // that's undefined anyway.
94 //
95 // If we see anything other than a 0, then we have possible
96 // contention, and need to use an atomicrmw primitive.
97 // The same arguments don't apply for increment, where it is legal
98 // (though inadvisable) to share shared_ptr references between
99 // threads, and have them all get copied at once. The argument
100 // also doesn't apply for __release_shared, because an outstanding
101 // weak_ptr::lock() could read / modify the shared count.
102 if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0)
103 {
104 // no need to do this store, because we are about
105 // to destroy everything.
106 //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
107 __on_zero_shared_weak();
108 }
109 else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_) == -1)
110 __on_zero_shared_weak();
111 }
112
113 __shared_weak_count*
lock()114 __shared_weak_count::lock() noexcept
115 {
116 long object_owners = __libcpp_atomic_load(&__shared_owners_);
117 while (object_owners != -1)
118 {
119 if (__libcpp_atomic_compare_exchange(&__shared_owners_,
120 &object_owners,
121 object_owners+1))
122 return this;
123 }
124 return nullptr;
125 }
126
127 const void*
__get_deleter(const type_info &) const128 __shared_weak_count::__get_deleter(const type_info&) const noexcept
129 {
130 return nullptr;
131 }
132
133 #if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
134
135 _LIBCPP_SAFE_STATIC static const std::size_t __sp_mut_count = 16;
136 _LIBCPP_SAFE_STATIC static __libcpp_mutex_t mut_back[__sp_mut_count] =
137 {
138 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
139 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
140 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
141 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER
142 };
143
__sp_mut(void * p)144 _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) noexcept
145 : __lx(p)
146 {
147 }
148
149 void
lock()150 __sp_mut::lock() noexcept
151 {
152 auto m = static_cast<__libcpp_mutex_t*>(__lx);
153 unsigned count = 0;
154 while (!__libcpp_mutex_trylock(m))
155 {
156 if (++count > 16)
157 {
158 __libcpp_mutex_lock(m);
159 break;
160 }
161 this_thread::yield();
162 }
163 }
164
165 void
unlock()166 __sp_mut::unlock() noexcept
167 {
168 __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx));
169 }
170
171 __sp_mut&
__get_sp_mut(const void * p)172 __get_sp_mut(const void* p)
173 {
174 static __sp_mut muts[__sp_mut_count]
175 {
176 &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3],
177 &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7],
178 &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11],
179 &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15]
180 };
181 return muts[hash<const void*>()(p) & (__sp_mut_count-1)];
182 }
183
184 #endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
185
186 void
declare_reachable(void *)187 declare_reachable(void*)
188 {
189 }
190
191 void
declare_no_pointers(char *,size_t)192 declare_no_pointers(char*, size_t)
193 {
194 }
195
196 void
undeclare_no_pointers(char *,size_t)197 undeclare_no_pointers(char*, size_t)
198 {
199 }
200
201 void*
__undeclare_reachable(void * p)202 __undeclare_reachable(void* p)
203 {
204 return p;
205 }
206
207 void*
align(size_t alignment,size_t size,void * & ptr,size_t & space)208 align(size_t alignment, size_t size, void*& ptr, size_t& space)
209 {
210 void* r = nullptr;
211 if (size <= space)
212 {
213 char* p1 = static_cast<char*>(ptr);
214 char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment);
215 size_t d = static_cast<size_t>(p2 - p1);
216 if (d <= space - size)
217 {
218 r = p2;
219 ptr = r;
220 space -= d;
221 }
222 }
223 return r;
224 }
225
226 _LIBCPP_END_NAMESPACE_STD
227