xref: /netbsd-src/external/gpl3/gcc/dist/libstdc++-v3/include/std/shared_mutex (revision 0a3071956a3a9fdebdbf7f338cf2d439b45fc728)
14d5abbe8Smrg// <shared_mutex> -*- C++ -*-
24d5abbe8Smrg
3b1e83836Smrg// Copyright (C) 2013-2022 Free Software Foundation, Inc.
44d5abbe8Smrg//
54d5abbe8Smrg// This file is part of the GNU ISO C++ Library.  This library is free
64d5abbe8Smrg// software; you can redistribute it and/or modify it under the
74d5abbe8Smrg// terms of the GNU General Public License as published by the
84d5abbe8Smrg// Free Software Foundation; either version 3, or (at your option)
94d5abbe8Smrg// any later version.
104d5abbe8Smrg
114d5abbe8Smrg// This library is distributed in the hope that it will be useful,
124d5abbe8Smrg// but WITHOUT ANY WARRANTY; without even the implied warranty of
134d5abbe8Smrg// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
144d5abbe8Smrg// GNU General Public License for more details.
154d5abbe8Smrg
164d5abbe8Smrg// Under Section 7 of GPL version 3, you are granted additional
174d5abbe8Smrg// permissions described in the GCC Runtime Library Exception, version
184d5abbe8Smrg// 3.1, as published by the Free Software Foundation.
194d5abbe8Smrg
204d5abbe8Smrg// You should have received a copy of the GNU General Public License and
214d5abbe8Smrg// a copy of the GCC Runtime Library Exception along with this program;
224d5abbe8Smrg// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
234d5abbe8Smrg// <http://www.gnu.org/licenses/>.
244d5abbe8Smrg
254d5abbe8Smrg/** @file include/shared_mutex
264d5abbe8Smrg *  This is a Standard C++ Library header.
274d5abbe8Smrg */
284d5abbe8Smrg
294d5abbe8Smrg#ifndef _GLIBCXX_SHARED_MUTEX
304d5abbe8Smrg#define _GLIBCXX_SHARED_MUTEX 1
314d5abbe8Smrg
324d5abbe8Smrg#pragma GCC system_header
334d5abbe8Smrg
34b17d1066Smrg#if __cplusplus >= 201402L
354d5abbe8Smrg
36b1e83836Smrg#include <bits/chrono.h>
374d5abbe8Smrg#include <bits/functexcept.h>
38b1e83836Smrg#include <bits/move.h>        // move, __exchange
39b1e83836Smrg#include <bits/std_mutex.h>   // defer_lock_t
40b1e83836Smrg
41b1e83836Smrg#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
42b1e83836Smrg# include <condition_variable>
43b1e83836Smrg#endif
444d5abbe8Smrg
454d5abbe8Smrgnamespace std _GLIBCXX_VISIBILITY(default)
464d5abbe8Smrg{
474d5abbe8Smrg_GLIBCXX_BEGIN_NAMESPACE_VERSION
484d5abbe8Smrg
494d5abbe8Smrg  /**
50fb8a8121Smrg   * @addtogroup mutexes
514d5abbe8Smrg   * @{
524d5abbe8Smrg   */
534d5abbe8Smrg
544d5abbe8Smrg#ifdef _GLIBCXX_HAS_GTHREADS
554d5abbe8Smrg
56b17d1066Smrg#if __cplusplus >= 201703L
57b1e83836Smrg#define __cpp_lib_shared_mutex 201505L
58f9a78e0eSmrg  class shared_mutex;
59f9a78e0eSmrg#endif
604d5abbe8Smrg
61b1e83836Smrg#define __cpp_lib_shared_timed_mutex 201402L
62f9a78e0eSmrg  class shared_timed_mutex;
63f9a78e0eSmrg
64fb8a8121Smrg  /// @cond undocumented
65fb8a8121Smrg
66f9a78e0eSmrg#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
67181254a7Smrg#ifdef __gthrw
68181254a7Smrg#define _GLIBCXX_GTHRW(name) \
69181254a7Smrg  __gthrw(pthread_ ## name); \
70181254a7Smrg  static inline int \
71181254a7Smrg  __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
72181254a7Smrg  { \
73181254a7Smrg    if (__gthread_active_p ()) \
74181254a7Smrg      return __gthrw_(pthread_ ## name) (__rwlock); \
75181254a7Smrg    else \
76181254a7Smrg      return 0; \
77181254a7Smrg  }
78181254a7Smrg  _GLIBCXX_GTHRW(rwlock_rdlock)
79181254a7Smrg  _GLIBCXX_GTHRW(rwlock_tryrdlock)
80181254a7Smrg  _GLIBCXX_GTHRW(rwlock_wrlock)
81181254a7Smrg  _GLIBCXX_GTHRW(rwlock_trywrlock)
82181254a7Smrg  _GLIBCXX_GTHRW(rwlock_unlock)
83181254a7Smrg# ifndef PTHREAD_RWLOCK_INITIALIZER
84181254a7Smrg  _GLIBCXX_GTHRW(rwlock_destroy)
85181254a7Smrg  __gthrw(pthread_rwlock_init);
86181254a7Smrg  static inline int
87181254a7Smrg  __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
88181254a7Smrg  {
89181254a7Smrg    if (__gthread_active_p ())
90181254a7Smrg      return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
91181254a7Smrg    else
92181254a7Smrg      return 0;
93181254a7Smrg  }
94181254a7Smrg# endif
95181254a7Smrg# if _GTHREAD_USE_MUTEX_TIMEDLOCK
96181254a7Smrg   __gthrw(pthread_rwlock_timedrdlock);
97181254a7Smrg  static inline int
98181254a7Smrg  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
99181254a7Smrg				const timespec *__ts)
100181254a7Smrg  {
101181254a7Smrg    if (__gthread_active_p ())
102181254a7Smrg      return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
103181254a7Smrg    else
104181254a7Smrg      return 0;
105181254a7Smrg  }
106181254a7Smrg   __gthrw(pthread_rwlock_timedwrlock);
107181254a7Smrg  static inline int
108181254a7Smrg  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
109181254a7Smrg				const timespec *__ts)
110181254a7Smrg  {
111181254a7Smrg    if (__gthread_active_p ())
112181254a7Smrg      return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
113181254a7Smrg    else
114181254a7Smrg      return 0;
115181254a7Smrg  }
116181254a7Smrg# endif
117181254a7Smrg#else
118181254a7Smrg  static inline int
119181254a7Smrg  __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
120181254a7Smrg  { return pthread_rwlock_rdlock (__rwlock); }
121181254a7Smrg  static inline int
122181254a7Smrg  __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
123181254a7Smrg  { return pthread_rwlock_tryrdlock (__rwlock); }
124181254a7Smrg  static inline int
125181254a7Smrg  __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
126181254a7Smrg  { return pthread_rwlock_wrlock (__rwlock); }
127181254a7Smrg  static inline int
128181254a7Smrg  __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
129181254a7Smrg  { return pthread_rwlock_trywrlock (__rwlock); }
130181254a7Smrg  static inline int
131181254a7Smrg  __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
132181254a7Smrg  { return pthread_rwlock_unlock (__rwlock); }
133181254a7Smrg  static inline int
134181254a7Smrg  __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
135181254a7Smrg  { return pthread_rwlock_destroy (__rwlock); }
136181254a7Smrg  static inline int
137181254a7Smrg  __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
138181254a7Smrg  { return pthread_rwlock_init (__rwlock, NULL); }
139181254a7Smrg# if _GTHREAD_USE_MUTEX_TIMEDLOCK
140181254a7Smrg  static inline int
141181254a7Smrg  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
142181254a7Smrg				const timespec *__ts)
143181254a7Smrg  { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
144181254a7Smrg  static inline int
145181254a7Smrg  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
146181254a7Smrg				const timespec *__ts)
147181254a7Smrg  { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
148181254a7Smrg# endif
149181254a7Smrg#endif
150181254a7Smrg
151f9a78e0eSmrg  /// A shared mutex type implemented using pthread_rwlock_t.
152f9a78e0eSmrg  class __shared_mutex_pthread
1534d5abbe8Smrg  {
154f9a78e0eSmrg    friend class shared_timed_mutex;
1554d5abbe8Smrg
1564d5abbe8Smrg#ifdef PTHREAD_RWLOCK_INITIALIZER
1574d5abbe8Smrg    pthread_rwlock_t	_M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
1584d5abbe8Smrg
1594d5abbe8Smrg  public:
160f9a78e0eSmrg    __shared_mutex_pthread() = default;
161f9a78e0eSmrg    ~__shared_mutex_pthread() = default;
1624d5abbe8Smrg#else
1634d5abbe8Smrg    pthread_rwlock_t	_M_rwlock;
1644d5abbe8Smrg
1654d5abbe8Smrg  public:
166f9a78e0eSmrg    __shared_mutex_pthread()
1674d5abbe8Smrg    {
168181254a7Smrg      int __ret = __glibcxx_rwlock_init(&_M_rwlock);
1694d5abbe8Smrg      if (__ret == ENOMEM)
1704d5abbe8Smrg	__throw_bad_alloc();
1714d5abbe8Smrg      else if (__ret == EAGAIN)
1724d5abbe8Smrg	__throw_system_error(int(errc::resource_unavailable_try_again));
1734d5abbe8Smrg      else if (__ret == EPERM)
1744d5abbe8Smrg	__throw_system_error(int(errc::operation_not_permitted));
1754d5abbe8Smrg      // Errors not handled: EBUSY, EINVAL
176f9a78e0eSmrg      __glibcxx_assert(__ret == 0);
1774d5abbe8Smrg    }
1784d5abbe8Smrg
179f9a78e0eSmrg    ~__shared_mutex_pthread()
1804d5abbe8Smrg    {
181181254a7Smrg      int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
1824d5abbe8Smrg      // Errors not handled: EBUSY, EINVAL
183f9a78e0eSmrg      __glibcxx_assert(__ret == 0);
1844d5abbe8Smrg    }
1854d5abbe8Smrg#endif
1864d5abbe8Smrg
187f9a78e0eSmrg    __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
188f9a78e0eSmrg    __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
1894d5abbe8Smrg
1904d5abbe8Smrg    void
1914d5abbe8Smrg    lock()
1924d5abbe8Smrg    {
193181254a7Smrg      int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
1944d5abbe8Smrg      if (__ret == EDEADLK)
1954d5abbe8Smrg	__throw_system_error(int(errc::resource_deadlock_would_occur));
1964d5abbe8Smrg      // Errors not handled: EINVAL
197f9a78e0eSmrg      __glibcxx_assert(__ret == 0);
1984d5abbe8Smrg    }
1994d5abbe8Smrg
2004d5abbe8Smrg    bool
2014d5abbe8Smrg    try_lock()
2024d5abbe8Smrg    {
203181254a7Smrg      int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
2044d5abbe8Smrg      if (__ret == EBUSY) return false;
2054d5abbe8Smrg      // Errors not handled: EINVAL
206f9a78e0eSmrg      __glibcxx_assert(__ret == 0);
2074d5abbe8Smrg      return true;
2084d5abbe8Smrg    }
2094d5abbe8Smrg
2104d5abbe8Smrg    void
2114d5abbe8Smrg    unlock()
2124d5abbe8Smrg    {
213181254a7Smrg      int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
2144d5abbe8Smrg      // Errors not handled: EPERM, EBUSY, EINVAL
215f9a78e0eSmrg      __glibcxx_assert(__ret == 0);
2164d5abbe8Smrg    }
2174d5abbe8Smrg
2184d5abbe8Smrg    // Shared ownership
2194d5abbe8Smrg
2204d5abbe8Smrg    void
2214d5abbe8Smrg    lock_shared()
2224d5abbe8Smrg    {
2234d5abbe8Smrg      int __ret;
2244d5abbe8Smrg      // We retry if we exceeded the maximum number of read locks supported by
2254d5abbe8Smrg      // the POSIX implementation; this can result in busy-waiting, but this
2264d5abbe8Smrg      // is okay based on the current specification of forward progress
2274d5abbe8Smrg      // guarantees by the standard.
2284d5abbe8Smrg      do
229181254a7Smrg	__ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
2304d5abbe8Smrg      while (__ret == EAGAIN);
2314d5abbe8Smrg      if (__ret == EDEADLK)
2324d5abbe8Smrg	__throw_system_error(int(errc::resource_deadlock_would_occur));
2334d5abbe8Smrg      // Errors not handled: EINVAL
234f9a78e0eSmrg      __glibcxx_assert(__ret == 0);
2354d5abbe8Smrg    }
2364d5abbe8Smrg
2374d5abbe8Smrg    bool
2384d5abbe8Smrg    try_lock_shared()
2394d5abbe8Smrg    {
240181254a7Smrg      int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
2414d5abbe8Smrg      // If the maximum number of read locks has been exceeded, we just fail
2424d5abbe8Smrg      // to acquire the lock.  Unlike for lock(), we are not allowed to throw
2434d5abbe8Smrg      // an exception.
2444d5abbe8Smrg      if (__ret == EBUSY || __ret == EAGAIN) return false;
2454d5abbe8Smrg      // Errors not handled: EINVAL
246f9a78e0eSmrg      __glibcxx_assert(__ret == 0);
2474d5abbe8Smrg      return true;
2484d5abbe8Smrg    }
2494d5abbe8Smrg
2504d5abbe8Smrg    void
2514d5abbe8Smrg    unlock_shared()
2524d5abbe8Smrg    {
2534d5abbe8Smrg      unlock();
2544d5abbe8Smrg    }
2554d5abbe8Smrg
256f9a78e0eSmrg    void* native_handle() { return &_M_rwlock; }
257f9a78e0eSmrg  };
258f9a78e0eSmrg#endif
2594d5abbe8Smrg
260f9a78e0eSmrg#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
261f9a78e0eSmrg  /// A shared mutex type implemented using std::condition_variable.
262f9a78e0eSmrg  class __shared_mutex_cv
263f9a78e0eSmrg  {
264f9a78e0eSmrg    friend class shared_timed_mutex;
2654d5abbe8Smrg
2664d5abbe8Smrg    // Based on Howard Hinnant's reference implementation from N2406.
2674d5abbe8Smrg
2684d5abbe8Smrg    // The high bit of _M_state is the write-entered flag which is set to
2694d5abbe8Smrg    // indicate a writer has taken the lock or is queuing to take the lock.
2704d5abbe8Smrg    // The remaining bits are the count of reader locks.
2714d5abbe8Smrg    //
2724d5abbe8Smrg    // To take a reader lock, block on gate1 while the write-entered flag is
2734d5abbe8Smrg    // set or the maximum number of reader locks is held, then increment the
2744d5abbe8Smrg    // reader lock count.
2754d5abbe8Smrg    // To release, decrement the count, then if the write-entered flag is set
2764d5abbe8Smrg    // and the count is zero then signal gate2 to wake a queued writer,
2774d5abbe8Smrg    // otherwise if the maximum number of reader locks was held signal gate1
2784d5abbe8Smrg    // to wake a reader.
2794d5abbe8Smrg    //
2804d5abbe8Smrg    // To take a writer lock, block on gate1 while the write-entered flag is
2814d5abbe8Smrg    // set, then set the write-entered flag to start queueing, then block on
2824d5abbe8Smrg    // gate2 while the number of reader locks is non-zero.
2834d5abbe8Smrg    // To release, unset the write-entered flag and signal gate1 to wake all
2844d5abbe8Smrg    // blocked readers and writers.
2854d5abbe8Smrg    //
2864d5abbe8Smrg    // This means that when no reader locks are held readers and writers get
2874d5abbe8Smrg    // equal priority. When one or more reader locks is held a writer gets
2884d5abbe8Smrg    // priority and no more reader locks can be taken while the writer is
2894d5abbe8Smrg    // queued.
2904d5abbe8Smrg
2914d5abbe8Smrg    // Only locked when accessing _M_state or waiting on condition variables.
2924d5abbe8Smrg    mutex		_M_mut;
2934d5abbe8Smrg    // Used to block while write-entered is set or reader count at maximum.
2944d5abbe8Smrg    condition_variable	_M_gate1;
2954d5abbe8Smrg    // Used to block queued writers while reader count is non-zero.
2964d5abbe8Smrg    condition_variable	_M_gate2;
2974d5abbe8Smrg    // The write-entered flag and reader count.
2984d5abbe8Smrg    unsigned		_M_state;
2994d5abbe8Smrg
3004d5abbe8Smrg    static constexpr unsigned _S_write_entered
3014d5abbe8Smrg      = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
3024d5abbe8Smrg    static constexpr unsigned _S_max_readers = ~_S_write_entered;
3034d5abbe8Smrg
3044d5abbe8Smrg    // Test whether the write-entered flag is set. _M_mut must be locked.
3054d5abbe8Smrg    bool _M_write_entered() const { return _M_state & _S_write_entered; }
3064d5abbe8Smrg
3074d5abbe8Smrg    // The number of reader locks currently held. _M_mut must be locked.
3084d5abbe8Smrg    unsigned _M_readers() const { return _M_state & _S_max_readers; }
3094d5abbe8Smrg
3104d5abbe8Smrg  public:
311f9a78e0eSmrg    __shared_mutex_cv() : _M_state(0) {}
3124d5abbe8Smrg
313f9a78e0eSmrg    ~__shared_mutex_cv()
3144d5abbe8Smrg    {
315f9a78e0eSmrg      __glibcxx_assert( _M_state == 0 );
3164d5abbe8Smrg    }
3174d5abbe8Smrg
318f9a78e0eSmrg    __shared_mutex_cv(const __shared_mutex_cv&) = delete;
319f9a78e0eSmrg    __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
3204d5abbe8Smrg
3214d5abbe8Smrg    // Exclusive ownership
3224d5abbe8Smrg
3234d5abbe8Smrg    void
3244d5abbe8Smrg    lock()
3254d5abbe8Smrg    {
3264d5abbe8Smrg      unique_lock<mutex> __lk(_M_mut);
3274d5abbe8Smrg      // Wait until we can set the write-entered flag.
3284d5abbe8Smrg      _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
3294d5abbe8Smrg      _M_state |= _S_write_entered;
3304d5abbe8Smrg      // Then wait until there are no more readers.
3314d5abbe8Smrg      _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
3324d5abbe8Smrg    }
3334d5abbe8Smrg
3344d5abbe8Smrg    bool
3354d5abbe8Smrg    try_lock()
3364d5abbe8Smrg    {
3374d5abbe8Smrg      unique_lock<mutex> __lk(_M_mut, try_to_lock);
3384d5abbe8Smrg      if (__lk.owns_lock() && _M_state == 0)
3394d5abbe8Smrg	{
3404d5abbe8Smrg	  _M_state = _S_write_entered;
3414d5abbe8Smrg	  return true;
3424d5abbe8Smrg	}
3434d5abbe8Smrg      return false;
3444d5abbe8Smrg    }
3454d5abbe8Smrg
3464d5abbe8Smrg    void
3474d5abbe8Smrg    unlock()
3484d5abbe8Smrg    {
3494d5abbe8Smrg      lock_guard<mutex> __lk(_M_mut);
350f9a78e0eSmrg      __glibcxx_assert( _M_write_entered() );
3514d5abbe8Smrg      _M_state = 0;
3524d5abbe8Smrg      // call notify_all() while mutex is held so that another thread can't
3534d5abbe8Smrg      // lock and unlock the mutex then destroy *this before we make the call.
3544d5abbe8Smrg      _M_gate1.notify_all();
3554d5abbe8Smrg    }
3564d5abbe8Smrg
3574d5abbe8Smrg    // Shared ownership
3584d5abbe8Smrg
3594d5abbe8Smrg    void
3604d5abbe8Smrg    lock_shared()
3614d5abbe8Smrg    {
3624d5abbe8Smrg      unique_lock<mutex> __lk(_M_mut);
3634d5abbe8Smrg      _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
3644d5abbe8Smrg      ++_M_state;
3654d5abbe8Smrg    }
3664d5abbe8Smrg
3674d5abbe8Smrg    bool
3684d5abbe8Smrg    try_lock_shared()
3694d5abbe8Smrg    {
3704d5abbe8Smrg      unique_lock<mutex> __lk(_M_mut, try_to_lock);
3714d5abbe8Smrg      if (!__lk.owns_lock())
3724d5abbe8Smrg	return false;
3734d5abbe8Smrg      if (_M_state < _S_max_readers)
3744d5abbe8Smrg	{
3754d5abbe8Smrg	  ++_M_state;
3764d5abbe8Smrg	  return true;
3774d5abbe8Smrg	}
3784d5abbe8Smrg      return false;
3794d5abbe8Smrg    }
3804d5abbe8Smrg
3814d5abbe8Smrg    void
3824d5abbe8Smrg    unlock_shared()
3834d5abbe8Smrg    {
3844d5abbe8Smrg      lock_guard<mutex> __lk(_M_mut);
385f9a78e0eSmrg      __glibcxx_assert( _M_readers() > 0 );
3864d5abbe8Smrg      auto __prev = _M_state--;
3874d5abbe8Smrg      if (_M_write_entered())
3884d5abbe8Smrg	{
3894d5abbe8Smrg	  // Wake the queued writer if there are no more readers.
3904d5abbe8Smrg	  if (_M_readers() == 0)
3914d5abbe8Smrg	    _M_gate2.notify_one();
3924d5abbe8Smrg	  // No need to notify gate1 because we give priority to the queued
3934d5abbe8Smrg	  // writer, and that writer will eventually notify gate1 after it
3944d5abbe8Smrg	  // clears the write-entered flag.
3954d5abbe8Smrg	}
3964d5abbe8Smrg      else
3974d5abbe8Smrg	{
3984d5abbe8Smrg	  // Wake any thread that was blocked on reader overflow.
3994d5abbe8Smrg	  if (__prev == _S_max_readers)
4004d5abbe8Smrg	    _M_gate1.notify_one();
4014d5abbe8Smrg	}
4024d5abbe8Smrg    }
403f9a78e0eSmrg  };
404f9a78e0eSmrg#endif
405fb8a8121Smrg  /// @endcond
406f9a78e0eSmrg
407b1e83836Smrg#if __cplusplus >= 201703L
408f9a78e0eSmrg  /// The standard shared mutex type.
409f9a78e0eSmrg  class shared_mutex
410f9a78e0eSmrg  {
411f9a78e0eSmrg  public:
412f9a78e0eSmrg    shared_mutex() = default;
413f9a78e0eSmrg    ~shared_mutex() = default;
414f9a78e0eSmrg
415f9a78e0eSmrg    shared_mutex(const shared_mutex&) = delete;
416f9a78e0eSmrg    shared_mutex& operator=(const shared_mutex&) = delete;
417f9a78e0eSmrg
418f9a78e0eSmrg    // Exclusive ownership
419f9a78e0eSmrg
420f9a78e0eSmrg    void lock() { _M_impl.lock(); }
421f9a78e0eSmrg    bool try_lock() { return _M_impl.try_lock(); }
422f9a78e0eSmrg    void unlock() { _M_impl.unlock(); }
423f9a78e0eSmrg
424f9a78e0eSmrg    // Shared ownership
425f9a78e0eSmrg
426f9a78e0eSmrg    void lock_shared() { _M_impl.lock_shared(); }
427f9a78e0eSmrg    bool try_lock_shared() { return _M_impl.try_lock_shared(); }
428f9a78e0eSmrg    void unlock_shared() { _M_impl.unlock_shared(); }
429f9a78e0eSmrg
430f9a78e0eSmrg#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
431f9a78e0eSmrg    typedef void* native_handle_type;
432f9a78e0eSmrg    native_handle_type native_handle() { return _M_impl.native_handle(); }
433f9a78e0eSmrg
434f9a78e0eSmrg  private:
435f9a78e0eSmrg    __shared_mutex_pthread _M_impl;
436f9a78e0eSmrg#else
437f9a78e0eSmrg  private:
438f9a78e0eSmrg    __shared_mutex_cv _M_impl;
439f9a78e0eSmrg#endif
440f9a78e0eSmrg  };
441f9a78e0eSmrg#endif // C++17
442f9a78e0eSmrg
443fb8a8121Smrg  /// @cond undocumented
444f9a78e0eSmrg#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
445f9a78e0eSmrg  using __shared_timed_mutex_base = __shared_mutex_pthread;
446f9a78e0eSmrg#else
447f9a78e0eSmrg  using __shared_timed_mutex_base = __shared_mutex_cv;
448f9a78e0eSmrg#endif
449fb8a8121Smrg  /// @endcond
450f9a78e0eSmrg
451f9a78e0eSmrg  /// The standard shared timed mutex type.
452f9a78e0eSmrg  class shared_timed_mutex
453f9a78e0eSmrg  : private __shared_timed_mutex_base
454f9a78e0eSmrg  {
455f9a78e0eSmrg    using _Base = __shared_timed_mutex_base;
456f9a78e0eSmrg
457f9a78e0eSmrg    // Must use the same clock as condition_variable for __shared_mutex_cv.
458fb8a8121Smrg#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
459fb8a8121Smrg    using __clock_t = chrono::steady_clock;
460fb8a8121Smrg#else
461fb8a8121Smrg    using __clock_t = chrono::system_clock;
462fb8a8121Smrg#endif
463f9a78e0eSmrg
464f9a78e0eSmrg  public:
465f9a78e0eSmrg    shared_timed_mutex() = default;
466f9a78e0eSmrg    ~shared_timed_mutex() = default;
467f9a78e0eSmrg
468f9a78e0eSmrg    shared_timed_mutex(const shared_timed_mutex&) = delete;
469f9a78e0eSmrg    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
470f9a78e0eSmrg
471f9a78e0eSmrg    // Exclusive ownership
472f9a78e0eSmrg
473f9a78e0eSmrg    void lock() { _Base::lock(); }
474f9a78e0eSmrg    bool try_lock() { return _Base::try_lock(); }
475f9a78e0eSmrg    void unlock() { _Base::unlock(); }
476f9a78e0eSmrg
477f9a78e0eSmrg    template<typename _Rep, typename _Period>
478f9a78e0eSmrg      bool
479fb8a8121Smrg      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
480f9a78e0eSmrg      {
481fb8a8121Smrg	auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
482fb8a8121Smrg	if (ratio_greater<__clock_t::period, _Period>())
483fb8a8121Smrg	  ++__rt;
484fb8a8121Smrg	return try_lock_until(__clock_t::now() + __rt);
485f9a78e0eSmrg      }
486f9a78e0eSmrg
487f9a78e0eSmrg    // Shared ownership
488f9a78e0eSmrg
489f9a78e0eSmrg    void lock_shared() { _Base::lock_shared(); }
490f9a78e0eSmrg    bool try_lock_shared() { return _Base::try_lock_shared(); }
491f9a78e0eSmrg    void unlock_shared() { _Base::unlock_shared(); }
492f9a78e0eSmrg
493f9a78e0eSmrg    template<typename _Rep, typename _Period>
494f9a78e0eSmrg      bool
495fb8a8121Smrg      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
496f9a78e0eSmrg      {
497fb8a8121Smrg	auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
498fb8a8121Smrg	if (ratio_greater<__clock_t::period, _Period>())
499fb8a8121Smrg	  ++__rt;
500fb8a8121Smrg	return try_lock_shared_until(__clock_t::now() + __rt);
501f9a78e0eSmrg      }
502f9a78e0eSmrg
503f9a78e0eSmrg#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
504f9a78e0eSmrg
505f9a78e0eSmrg    // Exclusive ownership
506f9a78e0eSmrg
507f9a78e0eSmrg    template<typename _Duration>
508f9a78e0eSmrg      bool
509fb8a8121Smrg      try_lock_until(const chrono::time_point<chrono::system_clock,
510fb8a8121Smrg		     _Duration>& __atime)
511f9a78e0eSmrg      {
512f9a78e0eSmrg	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
513f9a78e0eSmrg	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
514f9a78e0eSmrg
515f9a78e0eSmrg	__gthread_time_t __ts =
516f9a78e0eSmrg	  {
517f9a78e0eSmrg	    static_cast<std::time_t>(__s.time_since_epoch().count()),
518f9a78e0eSmrg	    static_cast<long>(__ns.count())
519f9a78e0eSmrg	  };
520f9a78e0eSmrg
521181254a7Smrg	int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
522f9a78e0eSmrg	// On self-deadlock, we just fail to acquire the lock.  Technically,
523f9a78e0eSmrg	// the program violated the precondition.
524f9a78e0eSmrg	if (__ret == ETIMEDOUT || __ret == EDEADLK)
525f9a78e0eSmrg	  return false;
526f9a78e0eSmrg	// Errors not handled: EINVAL
527f9a78e0eSmrg	__glibcxx_assert(__ret == 0);
528f9a78e0eSmrg	return true;
529f9a78e0eSmrg      }
530f9a78e0eSmrg
531fb8a8121Smrg#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
532fb8a8121Smrg    template<typename _Duration>
533fb8a8121Smrg      bool
534fb8a8121Smrg      try_lock_until(const chrono::time_point<chrono::steady_clock,
535fb8a8121Smrg		   _Duration>& __atime)
536fb8a8121Smrg      {
537fb8a8121Smrg	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
538fb8a8121Smrg	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
539fb8a8121Smrg
540fb8a8121Smrg	__gthread_time_t __ts =
541fb8a8121Smrg	  {
542fb8a8121Smrg	    static_cast<std::time_t>(__s.time_since_epoch().count()),
543fb8a8121Smrg	    static_cast<long>(__ns.count())
544fb8a8121Smrg	  };
545fb8a8121Smrg
546fb8a8121Smrg	int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
547fb8a8121Smrg					       &__ts);
548fb8a8121Smrg	// On self-deadlock, we just fail to acquire the lock.  Technically,
549fb8a8121Smrg	// the program violated the precondition.
550fb8a8121Smrg	if (__ret == ETIMEDOUT || __ret == EDEADLK)
551fb8a8121Smrg	  return false;
552fb8a8121Smrg	// Errors not handled: EINVAL
553fb8a8121Smrg	__glibcxx_assert(__ret == 0);
554fb8a8121Smrg	return true;
555fb8a8121Smrg      }
556fb8a8121Smrg#endif
557fb8a8121Smrg
558f9a78e0eSmrg    template<typename _Clock, typename _Duration>
559f9a78e0eSmrg      bool
560fb8a8121Smrg      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
561f9a78e0eSmrg      {
562fb8a8121Smrg#if __cplusplus > 201703L
563fb8a8121Smrg	static_assert(chrono::is_clock_v<_Clock>);
564fb8a8121Smrg#endif
565fb8a8121Smrg	// The user-supplied clock may not tick at the same rate as
566fb8a8121Smrg	// steady_clock, so we must loop in order to guarantee that
567fb8a8121Smrg	// the timeout has expired before returning false.
568fb8a8121Smrg	typename _Clock::time_point __now = _Clock::now();
569fb8a8121Smrg	do {
570fb8a8121Smrg	    auto __rtime = __atime - __now;
571fb8a8121Smrg	    if (try_lock_for(__rtime))
572fb8a8121Smrg	      return true;
573fb8a8121Smrg	    __now = _Clock::now();
574fb8a8121Smrg	} while (__atime > __now);
575fb8a8121Smrg	return false;
576f9a78e0eSmrg      }
577f9a78e0eSmrg
578f9a78e0eSmrg    // Shared ownership
579f9a78e0eSmrg
580f9a78e0eSmrg    template<typename _Duration>
581f9a78e0eSmrg      bool
582fb8a8121Smrg      try_lock_shared_until(const chrono::time_point<chrono::system_clock,
583f9a78e0eSmrg			    _Duration>& __atime)
584f9a78e0eSmrg      {
585f9a78e0eSmrg	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
586f9a78e0eSmrg	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
587f9a78e0eSmrg
588f9a78e0eSmrg	__gthread_time_t __ts =
589f9a78e0eSmrg	  {
590f9a78e0eSmrg	    static_cast<std::time_t>(__s.time_since_epoch().count()),
591f9a78e0eSmrg	    static_cast<long>(__ns.count())
592f9a78e0eSmrg	  };
593f9a78e0eSmrg
594f9a78e0eSmrg	int __ret;
595f9a78e0eSmrg	// Unlike for lock(), we are not allowed to throw an exception so if
596f9a78e0eSmrg	// the maximum number of read locks has been exceeded, or we would
597f9a78e0eSmrg	// deadlock, we just try to acquire the lock again (and will time out
598f9a78e0eSmrg	// eventually).
599f9a78e0eSmrg	// In cases where we would exceed the maximum number of read locks
600f9a78e0eSmrg	// throughout the whole time until the timeout, we will fail to
601f9a78e0eSmrg	// acquire the lock even if it would be logically free; however, this
602f9a78e0eSmrg	// is allowed by the standard, and we made a "strong effort"
603f9a78e0eSmrg	// (see C++14 30.4.1.4p26).
604f9a78e0eSmrg	// For cases where the implementation detects a deadlock we
605f9a78e0eSmrg	// intentionally block and timeout so that an early return isn't
606f9a78e0eSmrg	// mistaken for a spurious failure, which might help users realise
607f9a78e0eSmrg	// there is a deadlock.
608f9a78e0eSmrg	do
609181254a7Smrg	  __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
610f9a78e0eSmrg	while (__ret == EAGAIN || __ret == EDEADLK);
611f9a78e0eSmrg	if (__ret == ETIMEDOUT)
612f9a78e0eSmrg	  return false;
613f9a78e0eSmrg	// Errors not handled: EINVAL
614f9a78e0eSmrg	__glibcxx_assert(__ret == 0);
615f9a78e0eSmrg	return true;
616f9a78e0eSmrg      }
617f9a78e0eSmrg
618fb8a8121Smrg#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
619fb8a8121Smrg    template<typename _Duration>
620fb8a8121Smrg      bool
621fb8a8121Smrg      try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
622fb8a8121Smrg			    _Duration>& __atime)
623fb8a8121Smrg      {
624fb8a8121Smrg	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
625fb8a8121Smrg	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
626fb8a8121Smrg
627fb8a8121Smrg	__gthread_time_t __ts =
628fb8a8121Smrg	  {
629fb8a8121Smrg	    static_cast<std::time_t>(__s.time_since_epoch().count()),
630fb8a8121Smrg	    static_cast<long>(__ns.count())
631fb8a8121Smrg	  };
632fb8a8121Smrg
633fb8a8121Smrg	int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
634fb8a8121Smrg					       &__ts);
635fb8a8121Smrg	// On self-deadlock, we just fail to acquire the lock.  Technically,
636fb8a8121Smrg	// the program violated the precondition.
637fb8a8121Smrg	if (__ret == ETIMEDOUT || __ret == EDEADLK)
638fb8a8121Smrg	  return false;
639fb8a8121Smrg	// Errors not handled: EINVAL
640fb8a8121Smrg	__glibcxx_assert(__ret == 0);
641fb8a8121Smrg	return true;
642fb8a8121Smrg      }
643fb8a8121Smrg#endif
644fb8a8121Smrg
645f9a78e0eSmrg    template<typename _Clock, typename _Duration>
646f9a78e0eSmrg      bool
647f9a78e0eSmrg      try_lock_shared_until(const chrono::time_point<_Clock,
648fb8a8121Smrg						     _Duration>& __atime)
649f9a78e0eSmrg      {
650fb8a8121Smrg#if __cplusplus > 201703L
651fb8a8121Smrg	static_assert(chrono::is_clock_v<_Clock>);
652fb8a8121Smrg#endif
653fb8a8121Smrg	// The user-supplied clock may not tick at the same rate as
654fb8a8121Smrg	// steady_clock, so we must loop in order to guarantee that
655fb8a8121Smrg	// the timeout has expired before returning false.
656fb8a8121Smrg	typename _Clock::time_point __now = _Clock::now();
657fb8a8121Smrg	do {
658fb8a8121Smrg	    auto __rtime = __atime - __now;
659fb8a8121Smrg	    if (try_lock_shared_for(__rtime))
660fb8a8121Smrg	      return true;
661fb8a8121Smrg	    __now = _Clock::now();
662fb8a8121Smrg	} while (__atime > __now);
663fb8a8121Smrg	return false;
664f9a78e0eSmrg      }
665f9a78e0eSmrg
666f9a78e0eSmrg#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
667f9a78e0eSmrg
668f9a78e0eSmrg    // Exclusive ownership
669f9a78e0eSmrg
670f9a78e0eSmrg    template<typename _Clock, typename _Duration>
671f9a78e0eSmrg      bool
672f9a78e0eSmrg      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
673f9a78e0eSmrg      {
674f9a78e0eSmrg	unique_lock<mutex> __lk(_M_mut);
675f9a78e0eSmrg	if (!_M_gate1.wait_until(__lk, __abs_time,
676f9a78e0eSmrg				 [=]{ return !_M_write_entered(); }))
677f9a78e0eSmrg	  {
678f9a78e0eSmrg	    return false;
679f9a78e0eSmrg	  }
680f9a78e0eSmrg	_M_state |= _S_write_entered;
681f9a78e0eSmrg	if (!_M_gate2.wait_until(__lk, __abs_time,
682f9a78e0eSmrg				 [=]{ return _M_readers() == 0; }))
683f9a78e0eSmrg	  {
684f9a78e0eSmrg	    _M_state ^= _S_write_entered;
685f9a78e0eSmrg	    // Wake all threads blocked while the write-entered flag was set.
686f9a78e0eSmrg	    _M_gate1.notify_all();
687f9a78e0eSmrg	    return false;
688f9a78e0eSmrg	  }
689f9a78e0eSmrg	return true;
690f9a78e0eSmrg      }
691f9a78e0eSmrg
692f9a78e0eSmrg    // Shared ownership
693f9a78e0eSmrg
694f9a78e0eSmrg    template <typename _Clock, typename _Duration>
695f9a78e0eSmrg      bool
696f9a78e0eSmrg      try_lock_shared_until(const chrono::time_point<_Clock,
697f9a78e0eSmrg						     _Duration>& __abs_time)
698f9a78e0eSmrg      {
699f9a78e0eSmrg	unique_lock<mutex> __lk(_M_mut);
700f9a78e0eSmrg	if (!_M_gate1.wait_until(__lk, __abs_time,
701f9a78e0eSmrg				 [=]{ return _M_state < _S_max_readers; }))
702f9a78e0eSmrg	  {
703f9a78e0eSmrg	    return false;
704f9a78e0eSmrg	  }
705f9a78e0eSmrg	++_M_state;
706f9a78e0eSmrg	return true;
707f9a78e0eSmrg      }
708f9a78e0eSmrg
7094d5abbe8Smrg#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
7104d5abbe8Smrg  };
7114d5abbe8Smrg#endif // _GLIBCXX_HAS_GTHREADS
7124d5abbe8Smrg
7134d5abbe8Smrg  /// shared_lock
7144d5abbe8Smrg  template<typename _Mutex>
7154d5abbe8Smrg    class shared_lock
7164d5abbe8Smrg    {
7174d5abbe8Smrg    public:
7184d5abbe8Smrg      typedef _Mutex mutex_type;
7194d5abbe8Smrg
7204d5abbe8Smrg      // Shared locking
7214d5abbe8Smrg
7224d5abbe8Smrg      shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
7234d5abbe8Smrg
7244d5abbe8Smrg      explicit
725b17d1066Smrg      shared_lock(mutex_type& __m)
726b17d1066Smrg      : _M_pm(std::__addressof(__m)), _M_owns(true)
7274d5abbe8Smrg      { __m.lock_shared(); }
7284d5abbe8Smrg
7294d5abbe8Smrg      shared_lock(mutex_type& __m, defer_lock_t) noexcept
730b17d1066Smrg      : _M_pm(std::__addressof(__m)), _M_owns(false) { }
7314d5abbe8Smrg
7324d5abbe8Smrg      shared_lock(mutex_type& __m, try_to_lock_t)
733b17d1066Smrg      : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
7344d5abbe8Smrg
7354d5abbe8Smrg      shared_lock(mutex_type& __m, adopt_lock_t)
736b17d1066Smrg      : _M_pm(std::__addressof(__m)), _M_owns(true) { }
7374d5abbe8Smrg
7384d5abbe8Smrg      template<typename _Clock, typename _Duration>
7394d5abbe8Smrg	shared_lock(mutex_type& __m,
7404d5abbe8Smrg		    const chrono::time_point<_Clock, _Duration>& __abs_time)
741b17d1066Smrg      : _M_pm(std::__addressof(__m)),
742b17d1066Smrg	_M_owns(__m.try_lock_shared_until(__abs_time)) { }
7434d5abbe8Smrg
7444d5abbe8Smrg      template<typename _Rep, typename _Period>
7454d5abbe8Smrg	shared_lock(mutex_type& __m,
7464d5abbe8Smrg		    const chrono::duration<_Rep, _Period>& __rel_time)
747b17d1066Smrg      : _M_pm(std::__addressof(__m)),
748b17d1066Smrg	_M_owns(__m.try_lock_shared_for(__rel_time)) { }
7494d5abbe8Smrg
7504d5abbe8Smrg      ~shared_lock()
7514d5abbe8Smrg      {
7524d5abbe8Smrg	if (_M_owns)
7534d5abbe8Smrg	  _M_pm->unlock_shared();
7544d5abbe8Smrg      }
7554d5abbe8Smrg
7564d5abbe8Smrg      shared_lock(shared_lock const&) = delete;
7574d5abbe8Smrg      shared_lock& operator=(shared_lock const&) = delete;
7584d5abbe8Smrg
7594d5abbe8Smrg      shared_lock(shared_lock&& __sl) noexcept : shared_lock()
7604d5abbe8Smrg      { swap(__sl); }
7614d5abbe8Smrg
7624d5abbe8Smrg      shared_lock&
7634d5abbe8Smrg      operator=(shared_lock&& __sl) noexcept
7644d5abbe8Smrg      {
7654d5abbe8Smrg	shared_lock(std::move(__sl)).swap(*this);
7664d5abbe8Smrg	return *this;
7674d5abbe8Smrg      }
7684d5abbe8Smrg
7694d5abbe8Smrg      void
7704d5abbe8Smrg      lock()
7714d5abbe8Smrg      {
7724d5abbe8Smrg	_M_lockable();
7734d5abbe8Smrg	_M_pm->lock_shared();
7744d5abbe8Smrg	_M_owns = true;
7754d5abbe8Smrg      }
7764d5abbe8Smrg
7774d5abbe8Smrg      bool
7784d5abbe8Smrg      try_lock()
7794d5abbe8Smrg      {
7804d5abbe8Smrg	_M_lockable();
7814d5abbe8Smrg	return _M_owns = _M_pm->try_lock_shared();
7824d5abbe8Smrg      }
7834d5abbe8Smrg
7844d5abbe8Smrg      template<typename _Rep, typename _Period>
7854d5abbe8Smrg	bool
7864d5abbe8Smrg	try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
7874d5abbe8Smrg	{
7884d5abbe8Smrg	  _M_lockable();
7894d5abbe8Smrg	  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
7904d5abbe8Smrg	}
7914d5abbe8Smrg
7924d5abbe8Smrg      template<typename _Clock, typename _Duration>
7934d5abbe8Smrg	bool
7944d5abbe8Smrg	try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
7954d5abbe8Smrg	{
7964d5abbe8Smrg	  _M_lockable();
7974d5abbe8Smrg	  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
7984d5abbe8Smrg	}
7994d5abbe8Smrg
8004d5abbe8Smrg      void
8014d5abbe8Smrg      unlock()
8024d5abbe8Smrg      {
8034d5abbe8Smrg	if (!_M_owns)
804*0a307195Smrg	  __throw_system_error(int(errc::operation_not_permitted));
8054d5abbe8Smrg	_M_pm->unlock_shared();
8064d5abbe8Smrg	_M_owns = false;
8074d5abbe8Smrg      }
8084d5abbe8Smrg
8094d5abbe8Smrg      // Setters
8104d5abbe8Smrg
8114d5abbe8Smrg      void
8124d5abbe8Smrg      swap(shared_lock& __u) noexcept
8134d5abbe8Smrg      {
8144d5abbe8Smrg	std::swap(_M_pm, __u._M_pm);
8154d5abbe8Smrg	std::swap(_M_owns, __u._M_owns);
8164d5abbe8Smrg      }
8174d5abbe8Smrg
8184d5abbe8Smrg      mutex_type*
8194d5abbe8Smrg      release() noexcept
8204d5abbe8Smrg      {
8214d5abbe8Smrg	_M_owns = false;
822b1e83836Smrg	return std::__exchange(_M_pm, nullptr);
8234d5abbe8Smrg      }
8244d5abbe8Smrg
8254d5abbe8Smrg      // Getters
8264d5abbe8Smrg
8274d5abbe8Smrg      bool owns_lock() const noexcept { return _M_owns; }
8284d5abbe8Smrg
8294d5abbe8Smrg      explicit operator bool() const noexcept { return _M_owns; }
8304d5abbe8Smrg
8314d5abbe8Smrg      mutex_type* mutex() const noexcept { return _M_pm; }
8324d5abbe8Smrg
8334d5abbe8Smrg    private:
8344d5abbe8Smrg      void
8354d5abbe8Smrg      _M_lockable() const
8364d5abbe8Smrg      {
8374d5abbe8Smrg	if (_M_pm == nullptr)
8384d5abbe8Smrg	  __throw_system_error(int(errc::operation_not_permitted));
8394d5abbe8Smrg	if (_M_owns)
8404d5abbe8Smrg	  __throw_system_error(int(errc::resource_deadlock_would_occur));
8414d5abbe8Smrg      }
8424d5abbe8Smrg
8434d5abbe8Smrg      mutex_type*	_M_pm;
8444d5abbe8Smrg      bool		_M_owns;
8454d5abbe8Smrg    };
8464d5abbe8Smrg
8474d5abbe8Smrg  /// Swap specialization for shared_lock
848fb8a8121Smrg  /// @relates shared_mutex
8494d5abbe8Smrg  template<typename _Mutex>
8504d5abbe8Smrg    void
8514d5abbe8Smrg    swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
8524d5abbe8Smrg    { __x.swap(__y); }
8534d5abbe8Smrg
854a448f87cSmrg  /// @} group mutexes
8554d5abbe8Smrg_GLIBCXX_END_NAMESPACE_VERSION
8564d5abbe8Smrg} // namespace
8574d5abbe8Smrg
8584d5abbe8Smrg#endif // C++14
8594d5abbe8Smrg
8604d5abbe8Smrg#endif // _GLIBCXX_SHARED_MUTEX
861