xref: /netbsd-src/external/gpl3/gcc/dist/libstdc++-v3/include/bits/shared_ptr_atomic.h (revision 0a3071956a3a9fdebdbf7f338cf2d439b45fc728)
1 // shared_ptr atomic access -*- C++ -*-
2 
3 // Copyright (C) 2014-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/shared_ptr_atomic.h
26  *  This is an internal header file, included by other library headers.
27  *  Do not attempt to use it directly. @headername{memory}
28  */
29 
30 #ifndef _SHARED_PTR_ATOMIC_H
31 #define _SHARED_PTR_ATOMIC_H 1
32 
33 #include <bits/atomic_base.h>
34 
35 // Annotations for the custom locking in atomic<shared_ptr<T>>.
36 #if defined _GLIBCXX_TSAN && __has_include(<sanitizer/tsan_interface.h>)
37 #include <sanitizer/tsan_interface.h>
38 #define _GLIBCXX_TSAN_MUTEX_DESTROY(X) \
39   __tsan_mutex_destroy(X, __tsan_mutex_not_static)
40 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X) \
41   __tsan_mutex_pre_lock(X, __tsan_mutex_not_static|__tsan_mutex_try_lock)
42 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X) __tsan_mutex_post_lock(X, \
43     __tsan_mutex_not_static|__tsan_mutex_try_lock_failed, 0)
44 #define _GLIBCXX_TSAN_MUTEX_LOCKED(X) \
45   __tsan_mutex_post_lock(X, __tsan_mutex_not_static, 0)
46 #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X) __tsan_mutex_pre_unlock(X, 0)
47 #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X) __tsan_mutex_post_unlock(X, 0)
48 #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X) __tsan_mutex_pre_signal(X, 0)
49 #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X) __tsan_mutex_post_signal(X, 0)
50 #else
51 #define _GLIBCXX_TSAN_MUTEX_DESTROY(X)
52 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X)
53 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X)
54 #define _GLIBCXX_TSAN_MUTEX_LOCKED(X)
55 #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X)
56 #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X)
57 #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X)
58 #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X)
59 #endif
60 
_GLIBCXX_VISIBILITY(default)61 namespace std _GLIBCXX_VISIBILITY(default)
62 {
63 _GLIBCXX_BEGIN_NAMESPACE_VERSION
64 
65   /**
66    * @addtogroup pointer_abstractions
67    * @relates shared_ptr
68    * @{
69    */
70 
71   /// @cond undocumented
72 
73   struct _Sp_locker
74   {
75     _Sp_locker(const _Sp_locker&) = delete;
76     _Sp_locker& operator=(const _Sp_locker&) = delete;
77 
78 #ifdef __GTHREADS
79     explicit
80     _Sp_locker(const void*) noexcept;
81     _Sp_locker(const void*, const void*) noexcept;
82     ~_Sp_locker();
83 
84   private:
85     unsigned char _M_key1;
86     unsigned char _M_key2;
87 #else
88     explicit _Sp_locker(const void*, const void* = nullptr) { }
89 #endif
90   };
91 
92   /// @endcond
93 
94   /**
95    *  @brief  Report whether shared_ptr atomic operations are lock-free.
96    *  @param  __p A non-null pointer to a shared_ptr object.
97    *  @return True if atomic access to @c *__p is lock-free, false otherwise.
98    *  @{
99   */
100   template<typename _Tp, _Lock_policy _Lp>
101     inline bool
102     atomic_is_lock_free(const __shared_ptr<_Tp, _Lp>* __p)
103     {
104 #ifdef __GTHREADS
105       return __gthread_active_p() == 0;
106 #else
107       return true;
108 #endif
109     }
110 
111   template<typename _Tp>
112     inline bool
113     atomic_is_lock_free(const shared_ptr<_Tp>* __p)
114     { return std::atomic_is_lock_free<_Tp, __default_lock_policy>(__p); }
115 
116   /// @}
117 
118   /**
119    *  @brief  Atomic load for shared_ptr objects.
120    *  @param  __p A non-null pointer to a shared_ptr object.
121    *  @return @c *__p
122    *
123    *  The memory order shall not be `memory_order_release` or
124    *  `memory_order_acq_rel`.
125    *  @{
126   */
127   template<typename _Tp>
128     inline shared_ptr<_Tp>
129     atomic_load_explicit(const shared_ptr<_Tp>* __p, memory_order)
130     {
131       _Sp_locker __lock{__p};
132       return *__p;
133     }
134 
135   template<typename _Tp>
136     inline shared_ptr<_Tp>
137     atomic_load(const shared_ptr<_Tp>* __p)
138     { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
139 
140   template<typename _Tp, _Lock_policy _Lp>
141     inline __shared_ptr<_Tp, _Lp>
142     atomic_load_explicit(const __shared_ptr<_Tp, _Lp>* __p, memory_order)
143     {
144       _Sp_locker __lock{__p};
145       return *__p;
146     }
147 
148   template<typename _Tp, _Lock_policy _Lp>
149     inline __shared_ptr<_Tp, _Lp>
150     atomic_load(const __shared_ptr<_Tp, _Lp>* __p)
151     { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
152   /// @}
153 
154   /**
155    *  @brief  Atomic store for shared_ptr objects.
156    *  @param  __p A non-null pointer to a shared_ptr object.
157    *  @param  __r The value to store.
158    *
159    *  The memory order shall not be `memory_order_acquire` or
160    *  `memory_order_acq_rel`.
161    *  @{
162   */
163   template<typename _Tp>
164     inline void
165     atomic_store_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r,
166 			  memory_order)
167     {
168       _Sp_locker __lock{__p};
169       __p->swap(__r); // use swap so that **__p not destroyed while lock held
170     }
171 
172   template<typename _Tp>
173     inline void
174     atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
175     { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
176 
177   template<typename _Tp, _Lock_policy _Lp>
178     inline void
179     atomic_store_explicit(__shared_ptr<_Tp, _Lp>* __p,
180 			  __shared_ptr<_Tp, _Lp> __r,
181 			  memory_order)
182     {
183       _Sp_locker __lock{__p};
184       __p->swap(__r); // use swap so that **__p not destroyed while lock held
185     }
186 
187   template<typename _Tp, _Lock_policy _Lp>
188     inline void
189     atomic_store(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
190     { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
191   /// @}
192 
193   /**
194    *  @brief  Atomic exchange for shared_ptr objects.
195    *  @param  __p A non-null pointer to a shared_ptr object.
196    *  @param  __r New value to store in `*__p`.
197    *  @return The original value of `*__p`
198    *  @{
199   */
200   template<typename _Tp>
201     inline shared_ptr<_Tp>
202     atomic_exchange_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r,
203 			     memory_order)
204     {
205       _Sp_locker __lock{__p};
206       __p->swap(__r);
207       return __r;
208     }
209 
210   template<typename _Tp>
211     inline shared_ptr<_Tp>
212     atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
213     {
214       return std::atomic_exchange_explicit(__p, std::move(__r),
215 					   memory_order_seq_cst);
216     }
217 
218   template<typename _Tp, _Lock_policy _Lp>
219     inline __shared_ptr<_Tp, _Lp>
220     atomic_exchange_explicit(__shared_ptr<_Tp, _Lp>* __p,
221 			     __shared_ptr<_Tp, _Lp> __r,
222 			     memory_order)
223     {
224       _Sp_locker __lock{__p};
225       __p->swap(__r);
226       return __r;
227     }
228 
229   template<typename _Tp, _Lock_policy _Lp>
230     inline __shared_ptr<_Tp, _Lp>
231     atomic_exchange(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
232     {
233       return std::atomic_exchange_explicit(__p, std::move(__r),
234 					   memory_order_seq_cst);
235     }
236   /// @}
237 
238   /**
239    *  @brief  Atomic compare-and-swap for shared_ptr objects.
240    *  @param  __p A non-null pointer to a shared_ptr object.
241    *  @param  __v A non-null pointer to a shared_ptr object.
242    *  @param  __w A non-null pointer to a shared_ptr object.
243    *  @return True if `*__p` was equivalent to `*__v`, false otherwise.
244    *
245    *  The memory order for failure shall not be `memory_order_release` or
246    *  `memory_order_acq_rel`.
247    *  @{
248   */
249   template<typename _Tp>
250     bool
251     atomic_compare_exchange_strong_explicit(shared_ptr<_Tp>* __p,
252 					    shared_ptr<_Tp>* __v,
253 					    shared_ptr<_Tp> __w,
254 					    memory_order,
255 					    memory_order)
256     {
257       shared_ptr<_Tp> __x; // goes out of scope after __lock
258       _Sp_locker __lock{__p, __v};
259       owner_less<shared_ptr<_Tp>> __less;
260       if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
261 	{
262 	  __x = std::move(*__p);
263 	  *__p = std::move(__w);
264 	  return true;
265 	}
266       __x = std::move(*__v);
267       *__v = *__p;
268       return false;
269     }
270 
271   template<typename _Tp>
272     inline bool
273     atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
274 				 shared_ptr<_Tp> __w)
275     {
276       return std::atomic_compare_exchange_strong_explicit(__p, __v,
277 	  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
278     }
279 
280   template<typename _Tp>
281     inline bool
282     atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p,
283 					  shared_ptr<_Tp>* __v,
284 					  shared_ptr<_Tp> __w,
285 					  memory_order __success,
286 					  memory_order __failure)
287     {
288       return std::atomic_compare_exchange_strong_explicit(__p, __v,
289 	  std::move(__w), __success, __failure);
290     }
291 
292   template<typename _Tp>
293     inline bool
294     atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
295 				 shared_ptr<_Tp> __w)
296     {
297       return std::atomic_compare_exchange_weak_explicit(__p, __v,
298 	  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
299     }
300 
301   template<typename _Tp, _Lock_policy _Lp>
302     bool
303     atomic_compare_exchange_strong_explicit(__shared_ptr<_Tp, _Lp>* __p,
304 					    __shared_ptr<_Tp, _Lp>* __v,
305 					    __shared_ptr<_Tp, _Lp> __w,
306 					    memory_order,
307 					    memory_order)
308     {
309       __shared_ptr<_Tp, _Lp> __x; // goes out of scope after __lock
310       _Sp_locker __lock{__p, __v};
311       owner_less<__shared_ptr<_Tp, _Lp>> __less;
312       if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
313 	{
314 	  __x = std::move(*__p);
315 	  *__p = std::move(__w);
316 	  return true;
317 	}
318       __x = std::move(*__v);
319       *__v = *__p;
320       return false;
321     }
322 
323   template<typename _Tp, _Lock_policy _Lp>
324     inline bool
325     atomic_compare_exchange_strong(__shared_ptr<_Tp, _Lp>* __p,
326 				   __shared_ptr<_Tp, _Lp>* __v,
327 				   __shared_ptr<_Tp, _Lp> __w)
328     {
329       return std::atomic_compare_exchange_strong_explicit(__p, __v,
330 	  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
331     }
332 
333   template<typename _Tp, _Lock_policy _Lp>
334     inline bool
335     atomic_compare_exchange_weak_explicit(__shared_ptr<_Tp, _Lp>* __p,
336 					  __shared_ptr<_Tp, _Lp>* __v,
337 					  __shared_ptr<_Tp, _Lp> __w,
338 					  memory_order __success,
339 					  memory_order __failure)
340     {
341       return std::atomic_compare_exchange_strong_explicit(__p, __v,
342 	  std::move(__w), __success, __failure);
343     }
344 
345   template<typename _Tp, _Lock_policy _Lp>
346     inline bool
347     atomic_compare_exchange_weak(__shared_ptr<_Tp, _Lp>* __p,
348 				 __shared_ptr<_Tp, _Lp>* __v,
349 				 __shared_ptr<_Tp, _Lp> __w)
350     {
351       return std::atomic_compare_exchange_weak_explicit(__p, __v,
352 	  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
353     }
354   /// @}
355 
356   /// @} group pointer_abstractions
357 
358 #if __cplusplus >= 202002L
359 # define __cpp_lib_atomic_shared_ptr 201711L
360   template<typename _Tp>
361     struct atomic;
362 
363   /**
364    * @addtogroup pointer_abstractions
365    * @relates shared_ptr
366    * @{
367    */
368 
369   template<typename _Up>
370     static constexpr bool __is_shared_ptr = false;
371   template<typename _Up>
372     static constexpr bool __is_shared_ptr<shared_ptr<_Up>> = true;
373 
374   template<typename _Tp>
375     class _Sp_atomic
376     {
377       using value_type = _Tp;
378 
379       friend struct atomic<_Tp>;
380 
381       // An atomic version of __shared_count<> and __weak_count<>.
382       // Stores a _Sp_counted_base<>* but uses the LSB as a lock.
383       struct _Atomic_count
384       {
385 	// Either __shared_count<> or __weak_count<>
386 	using __count_type = decltype(_Tp::_M_refcount);
387 
388 	// _Sp_counted_base<>*
389 	using pointer = decltype(__count_type::_M_pi);
390 
391 	// Ensure we can use the LSB as the lock bit.
392 	static_assert(alignof(remove_pointer_t<pointer>) > 1);
393 
394 	constexpr _Atomic_count() noexcept = default;
395 
396 	explicit
397 	_Atomic_count(__count_type&& __c) noexcept
398 	: _M_val(reinterpret_cast<uintptr_t>(__c._M_pi))
399 	{
400 	  __c._M_pi = nullptr;
401 	}
402 
403 	~_Atomic_count()
404 	{
405 	  auto __val = _M_val.load(memory_order_relaxed);
406 	  _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val);
407 	  __glibcxx_assert(!(__val & _S_lock_bit));
408 	  if (auto __pi = reinterpret_cast<pointer>(__val))
409 	    {
410 	      if constexpr (__is_shared_ptr<_Tp>)
411 		__pi->_M_release();
412 	      else
413 		__pi->_M_weak_release();
414 	    }
415 	}
416 
417 	_Atomic_count(const _Atomic_count&) = delete;
418 	_Atomic_count& operator=(const _Atomic_count&) = delete;
419 
420 	// Precondition: Caller does not hold lock!
421 	// Returns the raw pointer value without the lock bit set.
422 	pointer
423 	lock(memory_order __o) const noexcept
424 	{
425 	  // To acquire the lock we flip the LSB from 0 to 1.
426 
427 	  auto __current = _M_val.load(memory_order_relaxed);
428 	  while (__current & _S_lock_bit)
429 	    {
430 #if __cpp_lib_atomic_wait
431 	      __detail::__thread_relax();
432 #endif
433 	      __current = _M_val.load(memory_order_relaxed);
434 	    }
435 
436 	  _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
437 
438 	  while (!_M_val.compare_exchange_strong(__current,
439 						 __current | _S_lock_bit,
440 						 __o,
441 						 memory_order_relaxed))
442 	    {
443 	      _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(&_M_val);
444 #if __cpp_lib_atomic_wait
445 	      __detail::__thread_relax();
446 #endif
447 	      __current = __current & ~_S_lock_bit;
448 	      _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
449 	    }
450 	  _GLIBCXX_TSAN_MUTEX_LOCKED(&_M_val);
451 	  return reinterpret_cast<pointer>(__current);
452 	}
453 
454 	// Precondition: caller holds lock!
455 	void
456 	unlock(memory_order __o) const noexcept
457 	{
458 	  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
459 	  _M_val.fetch_sub(1, __o);
460 	  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
461 	}
462 
463 	// Swaps the values of *this and __c, and unlocks *this.
464 	// Precondition: caller holds lock!
465 	void
466 	_M_swap_unlock(__count_type& __c, memory_order __o) noexcept
467 	{
468 	  if (__o != memory_order_seq_cst)
469 	    __o = memory_order_release;
470 	  auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
471 	  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
472 	  __x = _M_val.exchange(__x, __o);
473 	  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
474 	  __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
475 	}
476 
477 #if __cpp_lib_atomic_wait
478 	// Precondition: caller holds lock!
479 	void
480 	_M_wait_unlock(memory_order __o) const noexcept
481 	{
482 	  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
483 	  auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
484 	  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
485 	  _M_val.wait(__v & ~_S_lock_bit, __o);
486 	}
487 
488 	void
489 	notify_one() noexcept
490 	{
491 	  _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
492 	  _M_val.notify_one();
493 	  _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
494 	}
495 
496 	void
497 	notify_all() noexcept
498 	{
499 	  _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
500 	  _M_val.notify_all();
501 	  _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
502 	}
503 #endif
504 
505       private:
506 	mutable __atomic_base<uintptr_t> _M_val{0};
507 	static constexpr uintptr_t _S_lock_bit{1};
508       };
509 
510       typename _Tp::element_type* _M_ptr = nullptr;
511       _Atomic_count _M_refcount;
512 
513       static typename _Atomic_count::pointer
514       _S_add_ref(typename _Atomic_count::pointer __p)
515       {
516 	if (__p)
517 	  {
518 	    if constexpr (__is_shared_ptr<_Tp>)
519 	      __p->_M_add_ref_copy();
520 	    else
521 	      __p->_M_weak_add_ref();
522 	  }
523 	return __p;
524       }
525 
526       constexpr _Sp_atomic() noexcept = default;
527 
528       explicit
529       _Sp_atomic(value_type __r) noexcept
530       : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
531       { }
532 
533       ~_Sp_atomic() = default;
534 
535       _Sp_atomic(const _Sp_atomic&) = delete;
536       void operator=(const _Sp_atomic&) = delete;
537 
538       value_type
539       load(memory_order __o) const noexcept
540       {
541 	__glibcxx_assert(__o != memory_order_release
542 			   && __o != memory_order_acq_rel);
543 	// Ensure that the correct value of _M_ptr is visible after locking.,
544 	// by upgrading relaxed or consume to acquire.
545 	if (__o != memory_order_seq_cst)
546 	  __o = memory_order_acquire;
547 
548 	value_type __ret;
549 	auto __pi = _M_refcount.lock(__o);
550 	__ret._M_ptr = _M_ptr;
551 	__ret._M_refcount._M_pi = _S_add_ref(__pi);
552 	_M_refcount.unlock(memory_order_relaxed);
553 	return __ret;
554       }
555 
556       void
557       swap(value_type& __r, memory_order __o) noexcept
558       {
559 	_M_refcount.lock(memory_order_acquire);
560 	std::swap(_M_ptr, __r._M_ptr);
561 	_M_refcount._M_swap_unlock(__r._M_refcount, __o);
562       }
563 
564       bool
565       compare_exchange_strong(value_type& __expected, value_type __desired,
566 			      memory_order __o, memory_order __o2) noexcept
567       {
568 	bool __result = true;
569 	auto __pi = _M_refcount.lock(memory_order_acquire);
570 	if (_M_ptr == __expected._M_ptr
571 	      && __pi == __expected._M_refcount._M_pi)
572 	  {
573 	    _M_ptr = __desired._M_ptr;
574 	    _M_refcount._M_swap_unlock(__desired._M_refcount, __o);
575 	  }
576 	else
577 	  {
578 	    _Tp __sink = std::move(__expected);
579 	    __expected._M_ptr = _M_ptr;
580 	    __expected._M_refcount._M_pi = _S_add_ref(__pi);
581 	    _M_refcount.unlock(__o2);
582 	    __result = false;
583 	  }
584 	return __result;
585       }
586 
587 #if __cpp_lib_atomic_wait
588       void
589       wait(value_type __old, memory_order __o) const noexcept
590       {
591 	auto __pi = _M_refcount.lock(memory_order_acquire);
592 	if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
593 	  _M_refcount._M_wait_unlock(__o);
594 	else
595 	  _M_refcount.unlock(memory_order_relaxed);
596       }
597 
598       void
599       notify_one() noexcept
600       {
601 	_M_refcount.notify_one();
602       }
603 
604       void
605       notify_all() noexcept
606       {
607 	_M_refcount.notify_all();
608       }
609 #endif
610     };
611 
612   template<typename _Tp>
613     struct atomic<shared_ptr<_Tp>>
614     {
615     public:
616       using value_type = shared_ptr<_Tp>;
617 
618       static constexpr bool is_always_lock_free = false;
619 
620       bool
621       is_lock_free() const noexcept
622       { return false; }
623 
624       constexpr atomic() noexcept = default;
625 
626       // _GLIBCXX_RESOLVE_LIB_DEFECTS
627       // 3661. constinit atomic<shared_ptr<T>> a(nullptr); should work
628       constexpr atomic(nullptr_t) noexcept : atomic() { }
629 
630       atomic(shared_ptr<_Tp> __r) noexcept
631       : _M_impl(std::move(__r))
632       { }
633 
634       atomic(const atomic&) = delete;
635       void operator=(const atomic&) = delete;
636 
637       shared_ptr<_Tp>
638       load(memory_order __o = memory_order_seq_cst) const noexcept
639       { return _M_impl.load(__o); }
640 
641       operator shared_ptr<_Tp>() const noexcept
642       { return _M_impl.load(memory_order_seq_cst); }
643 
644       void
645       store(shared_ptr<_Tp> __desired,
646 	    memory_order __o = memory_order_seq_cst) noexcept
647       { _M_impl.swap(__desired, __o); }
648 
649       void
650       operator=(shared_ptr<_Tp> __desired) noexcept
651       { _M_impl.swap(__desired, memory_order_seq_cst); }
652 
653       // _GLIBCXX_RESOLVE_LIB_DEFECTS
654       // 3893. LWG 3661 broke atomic<shared_ptr<T>> a; a = nullptr;
655       void
656       operator=(nullptr_t) noexcept
657       { store(nullptr); }
658 
659       shared_ptr<_Tp>
660       exchange(shared_ptr<_Tp> __desired,
661 	       memory_order __o = memory_order_seq_cst) noexcept
662       {
663 	_M_impl.swap(__desired, __o);
664 	return __desired;
665       }
666 
667       bool
668       compare_exchange_strong(shared_ptr<_Tp>& __expected,
669 			      shared_ptr<_Tp> __desired,
670 			      memory_order __o, memory_order __o2) noexcept
671       {
672 	return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
673       }
674 
675       bool
676       compare_exchange_strong(value_type& __expected, value_type __desired,
677 			      memory_order __o = memory_order_seq_cst) noexcept
678       {
679 	memory_order __o2;
680 	switch (__o)
681 	{
682 	case memory_order_acq_rel:
683 	  __o2 = memory_order_acquire;
684 	  break;
685 	case memory_order_release:
686 	  __o2 = memory_order_relaxed;
687 	  break;
688 	default:
689 	  __o2 = __o;
690 	}
691 	return compare_exchange_strong(__expected, std::move(__desired),
692 				       __o, __o2);
693       }
694 
695       bool
696       compare_exchange_weak(value_type& __expected, value_type __desired,
697 			    memory_order __o, memory_order __o2) noexcept
698       {
699 	return compare_exchange_strong(__expected, std::move(__desired),
700 				       __o, __o2);
701       }
702 
703       bool
704       compare_exchange_weak(value_type& __expected, value_type __desired,
705 			    memory_order __o = memory_order_seq_cst) noexcept
706       {
707 	return compare_exchange_strong(__expected, std::move(__desired), __o);
708       }
709 
710 #if __cpp_lib_atomic_wait
711       void
712       wait(value_type __old,
713 	   memory_order __o = memory_order_seq_cst) const noexcept
714       {
715 	_M_impl.wait(std::move(__old), __o);
716       }
717 
718       void
719       notify_one() noexcept
720       {
721 	_M_impl.notify_one();
722       }
723 
724       void
725       notify_all() noexcept
726       {
727 	_M_impl.notify_all();
728       }
729 #endif
730 
731     private:
732       _Sp_atomic<shared_ptr<_Tp>> _M_impl;
733     };
734 
735   template<typename _Tp>
736     struct atomic<weak_ptr<_Tp>>
737     {
738     public:
739       using value_type = weak_ptr<_Tp>;
740 
741       static constexpr bool is_always_lock_free = false;
742 
743       bool
744       is_lock_free() const noexcept
745       { return false; }
746 
747       constexpr atomic() noexcept = default;
748 
749       atomic(weak_ptr<_Tp> __r) noexcept
750      : _M_impl(move(__r))
751       { }
752 
753       atomic(const atomic&) = delete;
754       void operator=(const atomic&) = delete;
755 
756       weak_ptr<_Tp>
757       load(memory_order __o = memory_order_seq_cst) const noexcept
758       { return _M_impl.load(__o); }
759 
760       operator weak_ptr<_Tp>() const noexcept
761       { return _M_impl.load(memory_order_seq_cst); }
762 
763       void
764       store(weak_ptr<_Tp> __desired,
765 	    memory_order __o = memory_order_seq_cst) noexcept
766       { _M_impl.swap(__desired, __o); }
767 
768       void
769       operator=(weak_ptr<_Tp> __desired) noexcept
770       { _M_impl.swap(__desired, memory_order_seq_cst); }
771 
772       weak_ptr<_Tp>
773       exchange(weak_ptr<_Tp> __desired,
774 	       memory_order __o = memory_order_seq_cst) noexcept
775       {
776 	_M_impl.swap(__desired, __o);
777 	return __desired;
778       }
779 
780       bool
781       compare_exchange_strong(weak_ptr<_Tp>& __expected,
782 			      weak_ptr<_Tp> __desired,
783 			      memory_order __o, memory_order __o2) noexcept
784       {
785 	return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
786       }
787 
788       bool
789       compare_exchange_strong(value_type& __expected, value_type __desired,
790 			      memory_order __o = memory_order_seq_cst) noexcept
791       {
792 	memory_order __o2;
793 	switch (__o)
794 	{
795 	case memory_order_acq_rel:
796 	  __o2 = memory_order_acquire;
797 	  break;
798 	case memory_order_release:
799 	  __o2 = memory_order_relaxed;
800 	  break;
801 	default:
802 	  __o2 = __o;
803 	}
804 	return compare_exchange_strong(__expected, std::move(__desired),
805 				       __o, __o2);
806       }
807 
808       bool
809       compare_exchange_weak(value_type& __expected, value_type __desired,
810 			    memory_order __o, memory_order __o2) noexcept
811       {
812 	return compare_exchange_strong(__expected, std::move(__desired),
813 				       __o, __o2);
814       }
815 
816       bool
817       compare_exchange_weak(value_type& __expected, value_type __desired,
818 			    memory_order __o = memory_order_seq_cst) noexcept
819       {
820 	return compare_exchange_strong(__expected, std::move(__desired), __o);
821       }
822 
823 #if __cpp_lib_atomic_wait
824       void
825       wait(value_type __old,
826 	   memory_order __o = memory_order_seq_cst) const noexcept
827       {
828 	_M_impl.wait(std::move(__old), __o);
829       }
830 
831       void
832       notify_one() noexcept
833       {
834 	_M_impl.notify_one();
835       }
836 
837       void
838       notify_all() noexcept
839       {
840 	_M_impl.notify_all();
841       }
842 #endif
843 
844     private:
845       _Sp_atomic<weak_ptr<_Tp>> _M_impl;
846     };
847   /// @} group pointer_abstractions
848 #endif // C++20
849 
850 _GLIBCXX_END_NAMESPACE_VERSION
851 } // namespace
852 
853 #endif // _SHARED_PTR_ATOMIC_H
854