11debfc3dSmrg// <shared_mutex> -*- C++ -*- 21debfc3dSmrg 3*8feb0f0bSmrg// Copyright (C) 2013-2020 Free Software Foundation, Inc. 41debfc3dSmrg// 51debfc3dSmrg// This file is part of the GNU ISO C++ Library. This library is free 61debfc3dSmrg// software; you can redistribute it and/or modify it under the 71debfc3dSmrg// terms of the GNU General Public License as published by the 81debfc3dSmrg// Free Software Foundation; either version 3, or (at your option) 91debfc3dSmrg// any later version. 101debfc3dSmrg 111debfc3dSmrg// This library is distributed in the hope that it will be useful, 121debfc3dSmrg// but WITHOUT ANY WARRANTY; without even the implied warranty of 131debfc3dSmrg// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 141debfc3dSmrg// GNU General Public License for more details. 151debfc3dSmrg 161debfc3dSmrg// Under Section 7 of GPL version 3, you are granted additional 171debfc3dSmrg// permissions described in the GCC Runtime Library Exception, version 181debfc3dSmrg// 3.1, as published by the Free Software Foundation. 191debfc3dSmrg 201debfc3dSmrg// You should have received a copy of the GNU General Public License and 211debfc3dSmrg// a copy of the GCC Runtime Library Exception along with this program; 221debfc3dSmrg// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 231debfc3dSmrg// <http://www.gnu.org/licenses/>. 241debfc3dSmrg 251debfc3dSmrg/** @file include/shared_mutex 261debfc3dSmrg * This is a Standard C++ Library header. 271debfc3dSmrg */ 281debfc3dSmrg 291debfc3dSmrg#ifndef _GLIBCXX_SHARED_MUTEX 301debfc3dSmrg#define _GLIBCXX_SHARED_MUTEX 1 311debfc3dSmrg 321debfc3dSmrg#pragma GCC system_header 331debfc3dSmrg 341debfc3dSmrg#if __cplusplus >= 201402L 351debfc3dSmrg 361debfc3dSmrg#include <bits/c++config.h> 371debfc3dSmrg#include <condition_variable> 381debfc3dSmrg#include <bits/functexcept.h> 391debfc3dSmrg 401debfc3dSmrgnamespace std _GLIBCXX_VISIBILITY(default) 411debfc3dSmrg{ 421debfc3dSmrg_GLIBCXX_BEGIN_NAMESPACE_VERSION 431debfc3dSmrg 441debfc3dSmrg /** 45*8feb0f0bSmrg * @addtogroup mutexes 461debfc3dSmrg * @{ 471debfc3dSmrg */ 481debfc3dSmrg 491debfc3dSmrg#ifdef _GLIBCXX_HAS_GTHREADS 501debfc3dSmrg 511debfc3dSmrg#if __cplusplus >= 201703L 521debfc3dSmrg#define __cpp_lib_shared_mutex 201505 531debfc3dSmrg class shared_mutex; 541debfc3dSmrg#endif 551debfc3dSmrg 561debfc3dSmrg#define __cpp_lib_shared_timed_mutex 201402 571debfc3dSmrg class shared_timed_mutex; 581debfc3dSmrg 59*8feb0f0bSmrg /// @cond undocumented 60*8feb0f0bSmrg 611debfc3dSmrg#if _GLIBCXX_USE_PTHREAD_RWLOCK_T 62c0a68be4Smrg#ifdef __gthrw 63c0a68be4Smrg#define _GLIBCXX_GTHRW(name) \ 64c0a68be4Smrg __gthrw(pthread_ ## name); \ 65c0a68be4Smrg static inline int \ 66c0a68be4Smrg __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \ 67c0a68be4Smrg { \ 68c0a68be4Smrg if (__gthread_active_p ()) \ 69c0a68be4Smrg return __gthrw_(pthread_ ## name) (__rwlock); \ 70c0a68be4Smrg else \ 71c0a68be4Smrg return 0; \ 72c0a68be4Smrg } 73c0a68be4Smrg _GLIBCXX_GTHRW(rwlock_rdlock) 74c0a68be4Smrg _GLIBCXX_GTHRW(rwlock_tryrdlock) 75c0a68be4Smrg _GLIBCXX_GTHRW(rwlock_wrlock) 76c0a68be4Smrg _GLIBCXX_GTHRW(rwlock_trywrlock) 77c0a68be4Smrg _GLIBCXX_GTHRW(rwlock_unlock) 78c0a68be4Smrg# ifndef PTHREAD_RWLOCK_INITIALIZER 79c0a68be4Smrg _GLIBCXX_GTHRW(rwlock_destroy) 80c0a68be4Smrg __gthrw(pthread_rwlock_init); 81c0a68be4Smrg static inline int 82c0a68be4Smrg __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock) 83c0a68be4Smrg { 84c0a68be4Smrg if (__gthread_active_p ()) 85c0a68be4Smrg return __gthrw_(pthread_rwlock_init) (__rwlock, NULL); 86c0a68be4Smrg else 87c0a68be4Smrg return 0; 88c0a68be4Smrg } 89c0a68be4Smrg# endif 90c0a68be4Smrg# if _GTHREAD_USE_MUTEX_TIMEDLOCK 91c0a68be4Smrg __gthrw(pthread_rwlock_timedrdlock); 92c0a68be4Smrg static inline int 93c0a68be4Smrg __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock, 94c0a68be4Smrg const timespec *__ts) 95c0a68be4Smrg { 96c0a68be4Smrg if (__gthread_active_p ()) 97c0a68be4Smrg return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts); 98c0a68be4Smrg else 99c0a68be4Smrg return 0; 100c0a68be4Smrg } 101c0a68be4Smrg __gthrw(pthread_rwlock_timedwrlock); 102c0a68be4Smrg static inline int 103c0a68be4Smrg __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock, 104c0a68be4Smrg const timespec *__ts) 105c0a68be4Smrg { 106c0a68be4Smrg if (__gthread_active_p ()) 107c0a68be4Smrg return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts); 108c0a68be4Smrg else 109c0a68be4Smrg return 0; 110c0a68be4Smrg } 111c0a68be4Smrg# endif 112c0a68be4Smrg#else 113c0a68be4Smrg static inline int 114c0a68be4Smrg __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock) 115c0a68be4Smrg { return pthread_rwlock_rdlock (__rwlock); } 116c0a68be4Smrg static inline int 117c0a68be4Smrg __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock) 118c0a68be4Smrg { return pthread_rwlock_tryrdlock (__rwlock); } 119c0a68be4Smrg static inline int 120c0a68be4Smrg __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock) 121c0a68be4Smrg { return pthread_rwlock_wrlock (__rwlock); } 122c0a68be4Smrg static inline int 123c0a68be4Smrg __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock) 124c0a68be4Smrg { return pthread_rwlock_trywrlock (__rwlock); } 125c0a68be4Smrg static inline int 126c0a68be4Smrg __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock) 127c0a68be4Smrg { return pthread_rwlock_unlock (__rwlock); } 128c0a68be4Smrg static inline int 129c0a68be4Smrg __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock) 130c0a68be4Smrg { return pthread_rwlock_destroy (__rwlock); } 131c0a68be4Smrg static inline int 132c0a68be4Smrg __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock) 133c0a68be4Smrg { return pthread_rwlock_init (__rwlock, NULL); } 134c0a68be4Smrg# if _GTHREAD_USE_MUTEX_TIMEDLOCK 135c0a68be4Smrg static inline int 136c0a68be4Smrg __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock, 137c0a68be4Smrg const timespec *__ts) 138c0a68be4Smrg { return pthread_rwlock_timedrdlock (__rwlock, __ts); } 139c0a68be4Smrg static inline int 140c0a68be4Smrg __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock, 141c0a68be4Smrg const timespec *__ts) 142c0a68be4Smrg { return pthread_rwlock_timedwrlock (__rwlock, __ts); } 143c0a68be4Smrg# endif 144c0a68be4Smrg#endif 145c0a68be4Smrg 1461debfc3dSmrg /// A shared mutex type implemented using pthread_rwlock_t. 1471debfc3dSmrg class __shared_mutex_pthread 1481debfc3dSmrg { 1491debfc3dSmrg friend class shared_timed_mutex; 1501debfc3dSmrg 1511debfc3dSmrg#ifdef PTHREAD_RWLOCK_INITIALIZER 1521debfc3dSmrg pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER; 1531debfc3dSmrg 1541debfc3dSmrg public: 1551debfc3dSmrg __shared_mutex_pthread() = default; 1561debfc3dSmrg ~__shared_mutex_pthread() = default; 1571debfc3dSmrg#else 1581debfc3dSmrg pthread_rwlock_t _M_rwlock; 1591debfc3dSmrg 1601debfc3dSmrg public: 1611debfc3dSmrg __shared_mutex_pthread() 1621debfc3dSmrg { 163c0a68be4Smrg int __ret = __glibcxx_rwlock_init(&_M_rwlock); 1641debfc3dSmrg if (__ret == ENOMEM) 1651debfc3dSmrg __throw_bad_alloc(); 1661debfc3dSmrg else if (__ret == EAGAIN) 1671debfc3dSmrg __throw_system_error(int(errc::resource_unavailable_try_again)); 1681debfc3dSmrg else if (__ret == EPERM) 1691debfc3dSmrg __throw_system_error(int(errc::operation_not_permitted)); 1701debfc3dSmrg // Errors not handled: EBUSY, EINVAL 1711debfc3dSmrg __glibcxx_assert(__ret == 0); 1721debfc3dSmrg } 1731debfc3dSmrg 1741debfc3dSmrg ~__shared_mutex_pthread() 1751debfc3dSmrg { 176c0a68be4Smrg int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock); 1771debfc3dSmrg // Errors not handled: EBUSY, EINVAL 1781debfc3dSmrg __glibcxx_assert(__ret == 0); 1791debfc3dSmrg } 1801debfc3dSmrg#endif 1811debfc3dSmrg 1821debfc3dSmrg __shared_mutex_pthread(const __shared_mutex_pthread&) = delete; 1831debfc3dSmrg __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete; 1841debfc3dSmrg 1851debfc3dSmrg void 1861debfc3dSmrg lock() 1871debfc3dSmrg { 188c0a68be4Smrg int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock); 1891debfc3dSmrg if (__ret == EDEADLK) 1901debfc3dSmrg __throw_system_error(int(errc::resource_deadlock_would_occur)); 1911debfc3dSmrg // Errors not handled: EINVAL 1921debfc3dSmrg __glibcxx_assert(__ret == 0); 1931debfc3dSmrg } 1941debfc3dSmrg 1951debfc3dSmrg bool 1961debfc3dSmrg try_lock() 1971debfc3dSmrg { 198c0a68be4Smrg int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock); 1991debfc3dSmrg if (__ret == EBUSY) return false; 2001debfc3dSmrg // Errors not handled: EINVAL 2011debfc3dSmrg __glibcxx_assert(__ret == 0); 2021debfc3dSmrg return true; 2031debfc3dSmrg } 2041debfc3dSmrg 2051debfc3dSmrg void 2061debfc3dSmrg unlock() 2071debfc3dSmrg { 208c0a68be4Smrg int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock); 2091debfc3dSmrg // Errors not handled: EPERM, EBUSY, EINVAL 2101debfc3dSmrg __glibcxx_assert(__ret == 0); 2111debfc3dSmrg } 2121debfc3dSmrg 2131debfc3dSmrg // Shared ownership 2141debfc3dSmrg 2151debfc3dSmrg void 2161debfc3dSmrg lock_shared() 2171debfc3dSmrg { 2181debfc3dSmrg int __ret; 2191debfc3dSmrg // We retry if we exceeded the maximum number of read locks supported by 2201debfc3dSmrg // the POSIX implementation; this can result in busy-waiting, but this 2211debfc3dSmrg // is okay based on the current specification of forward progress 2221debfc3dSmrg // guarantees by the standard. 2231debfc3dSmrg do 224c0a68be4Smrg __ret = __glibcxx_rwlock_rdlock(&_M_rwlock); 2251debfc3dSmrg while (__ret == EAGAIN); 2261debfc3dSmrg if (__ret == EDEADLK) 2271debfc3dSmrg __throw_system_error(int(errc::resource_deadlock_would_occur)); 2281debfc3dSmrg // Errors not handled: EINVAL 2291debfc3dSmrg __glibcxx_assert(__ret == 0); 2301debfc3dSmrg } 2311debfc3dSmrg 2321debfc3dSmrg bool 2331debfc3dSmrg try_lock_shared() 2341debfc3dSmrg { 235c0a68be4Smrg int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock); 2361debfc3dSmrg // If the maximum number of read locks has been exceeded, we just fail 2371debfc3dSmrg // to acquire the lock. Unlike for lock(), we are not allowed to throw 2381debfc3dSmrg // an exception. 2391debfc3dSmrg if (__ret == EBUSY || __ret == EAGAIN) return false; 2401debfc3dSmrg // Errors not handled: EINVAL 2411debfc3dSmrg __glibcxx_assert(__ret == 0); 2421debfc3dSmrg return true; 2431debfc3dSmrg } 2441debfc3dSmrg 2451debfc3dSmrg void 2461debfc3dSmrg unlock_shared() 2471debfc3dSmrg { 2481debfc3dSmrg unlock(); 2491debfc3dSmrg } 2501debfc3dSmrg 2511debfc3dSmrg void* native_handle() { return &_M_rwlock; } 2521debfc3dSmrg }; 2531debfc3dSmrg#endif 2541debfc3dSmrg 2551debfc3dSmrg#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK) 2561debfc3dSmrg /// A shared mutex type implemented using std::condition_variable. 2571debfc3dSmrg class __shared_mutex_cv 2581debfc3dSmrg { 2591debfc3dSmrg friend class shared_timed_mutex; 2601debfc3dSmrg 2611debfc3dSmrg // Based on Howard Hinnant's reference implementation from N2406. 2621debfc3dSmrg 2631debfc3dSmrg // The high bit of _M_state is the write-entered flag which is set to 2641debfc3dSmrg // indicate a writer has taken the lock or is queuing to take the lock. 2651debfc3dSmrg // The remaining bits are the count of reader locks. 2661debfc3dSmrg // 2671debfc3dSmrg // To take a reader lock, block on gate1 while the write-entered flag is 2681debfc3dSmrg // set or the maximum number of reader locks is held, then increment the 2691debfc3dSmrg // reader lock count. 2701debfc3dSmrg // To release, decrement the count, then if the write-entered flag is set 2711debfc3dSmrg // and the count is zero then signal gate2 to wake a queued writer, 2721debfc3dSmrg // otherwise if the maximum number of reader locks was held signal gate1 2731debfc3dSmrg // to wake a reader. 2741debfc3dSmrg // 2751debfc3dSmrg // To take a writer lock, block on gate1 while the write-entered flag is 2761debfc3dSmrg // set, then set the write-entered flag to start queueing, then block on 2771debfc3dSmrg // gate2 while the number of reader locks is non-zero. 2781debfc3dSmrg // To release, unset the write-entered flag and signal gate1 to wake all 2791debfc3dSmrg // blocked readers and writers. 2801debfc3dSmrg // 2811debfc3dSmrg // This means that when no reader locks are held readers and writers get 2821debfc3dSmrg // equal priority. When one or more reader locks is held a writer gets 2831debfc3dSmrg // priority and no more reader locks can be taken while the writer is 2841debfc3dSmrg // queued. 2851debfc3dSmrg 2861debfc3dSmrg // Only locked when accessing _M_state or waiting on condition variables. 2871debfc3dSmrg mutex _M_mut; 2881debfc3dSmrg // Used to block while write-entered is set or reader count at maximum. 2891debfc3dSmrg condition_variable _M_gate1; 2901debfc3dSmrg // Used to block queued writers while reader count is non-zero. 2911debfc3dSmrg condition_variable _M_gate2; 2921debfc3dSmrg // The write-entered flag and reader count. 2931debfc3dSmrg unsigned _M_state; 2941debfc3dSmrg 2951debfc3dSmrg static constexpr unsigned _S_write_entered 2961debfc3dSmrg = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1); 2971debfc3dSmrg static constexpr unsigned _S_max_readers = ~_S_write_entered; 2981debfc3dSmrg 2991debfc3dSmrg // Test whether the write-entered flag is set. _M_mut must be locked. 3001debfc3dSmrg bool _M_write_entered() const { return _M_state & _S_write_entered; } 3011debfc3dSmrg 3021debfc3dSmrg // The number of reader locks currently held. _M_mut must be locked. 3031debfc3dSmrg unsigned _M_readers() const { return _M_state & _S_max_readers; } 3041debfc3dSmrg 3051debfc3dSmrg public: 3061debfc3dSmrg __shared_mutex_cv() : _M_state(0) {} 3071debfc3dSmrg 3081debfc3dSmrg ~__shared_mutex_cv() 3091debfc3dSmrg { 3101debfc3dSmrg __glibcxx_assert( _M_state == 0 ); 3111debfc3dSmrg } 3121debfc3dSmrg 3131debfc3dSmrg __shared_mutex_cv(const __shared_mutex_cv&) = delete; 3141debfc3dSmrg __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete; 3151debfc3dSmrg 3161debfc3dSmrg // Exclusive ownership 3171debfc3dSmrg 3181debfc3dSmrg void 3191debfc3dSmrg lock() 3201debfc3dSmrg { 3211debfc3dSmrg unique_lock<mutex> __lk(_M_mut); 3221debfc3dSmrg // Wait until we can set the write-entered flag. 3231debfc3dSmrg _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); }); 3241debfc3dSmrg _M_state |= _S_write_entered; 3251debfc3dSmrg // Then wait until there are no more readers. 3261debfc3dSmrg _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; }); 3271debfc3dSmrg } 3281debfc3dSmrg 3291debfc3dSmrg bool 3301debfc3dSmrg try_lock() 3311debfc3dSmrg { 3321debfc3dSmrg unique_lock<mutex> __lk(_M_mut, try_to_lock); 3331debfc3dSmrg if (__lk.owns_lock() && _M_state == 0) 3341debfc3dSmrg { 3351debfc3dSmrg _M_state = _S_write_entered; 3361debfc3dSmrg return true; 3371debfc3dSmrg } 3381debfc3dSmrg return false; 3391debfc3dSmrg } 3401debfc3dSmrg 3411debfc3dSmrg void 3421debfc3dSmrg unlock() 3431debfc3dSmrg { 3441debfc3dSmrg lock_guard<mutex> __lk(_M_mut); 3451debfc3dSmrg __glibcxx_assert( _M_write_entered() ); 3461debfc3dSmrg _M_state = 0; 3471debfc3dSmrg // call notify_all() while mutex is held so that another thread can't 3481debfc3dSmrg // lock and unlock the mutex then destroy *this before we make the call. 3491debfc3dSmrg _M_gate1.notify_all(); 3501debfc3dSmrg } 3511debfc3dSmrg 3521debfc3dSmrg // Shared ownership 3531debfc3dSmrg 3541debfc3dSmrg void 3551debfc3dSmrg lock_shared() 3561debfc3dSmrg { 3571debfc3dSmrg unique_lock<mutex> __lk(_M_mut); 3581debfc3dSmrg _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; }); 3591debfc3dSmrg ++_M_state; 3601debfc3dSmrg } 3611debfc3dSmrg 3621debfc3dSmrg bool 3631debfc3dSmrg try_lock_shared() 3641debfc3dSmrg { 3651debfc3dSmrg unique_lock<mutex> __lk(_M_mut, try_to_lock); 3661debfc3dSmrg if (!__lk.owns_lock()) 3671debfc3dSmrg return false; 3681debfc3dSmrg if (_M_state < _S_max_readers) 3691debfc3dSmrg { 3701debfc3dSmrg ++_M_state; 3711debfc3dSmrg return true; 3721debfc3dSmrg } 3731debfc3dSmrg return false; 3741debfc3dSmrg } 3751debfc3dSmrg 3761debfc3dSmrg void 3771debfc3dSmrg unlock_shared() 3781debfc3dSmrg { 3791debfc3dSmrg lock_guard<mutex> __lk(_M_mut); 3801debfc3dSmrg __glibcxx_assert( _M_readers() > 0 ); 3811debfc3dSmrg auto __prev = _M_state--; 3821debfc3dSmrg if (_M_write_entered()) 3831debfc3dSmrg { 3841debfc3dSmrg // Wake the queued writer if there are no more readers. 3851debfc3dSmrg if (_M_readers() == 0) 3861debfc3dSmrg _M_gate2.notify_one(); 3871debfc3dSmrg // No need to notify gate1 because we give priority to the queued 3881debfc3dSmrg // writer, and that writer will eventually notify gate1 after it 3891debfc3dSmrg // clears the write-entered flag. 3901debfc3dSmrg } 3911debfc3dSmrg else 3921debfc3dSmrg { 3931debfc3dSmrg // Wake any thread that was blocked on reader overflow. 3941debfc3dSmrg if (__prev == _S_max_readers) 3951debfc3dSmrg _M_gate1.notify_one(); 3961debfc3dSmrg } 3971debfc3dSmrg } 3981debfc3dSmrg }; 3991debfc3dSmrg#endif 400*8feb0f0bSmrg /// @endcond 4011debfc3dSmrg 4021debfc3dSmrg#if __cplusplus > 201402L 4031debfc3dSmrg /// The standard shared mutex type. 4041debfc3dSmrg class shared_mutex 4051debfc3dSmrg { 4061debfc3dSmrg public: 4071debfc3dSmrg shared_mutex() = default; 4081debfc3dSmrg ~shared_mutex() = default; 4091debfc3dSmrg 4101debfc3dSmrg shared_mutex(const shared_mutex&) = delete; 4111debfc3dSmrg shared_mutex& operator=(const shared_mutex&) = delete; 4121debfc3dSmrg 4131debfc3dSmrg // Exclusive ownership 4141debfc3dSmrg 4151debfc3dSmrg void lock() { _M_impl.lock(); } 4161debfc3dSmrg bool try_lock() { return _M_impl.try_lock(); } 4171debfc3dSmrg void unlock() { _M_impl.unlock(); } 4181debfc3dSmrg 4191debfc3dSmrg // Shared ownership 4201debfc3dSmrg 4211debfc3dSmrg void lock_shared() { _M_impl.lock_shared(); } 4221debfc3dSmrg bool try_lock_shared() { return _M_impl.try_lock_shared(); } 4231debfc3dSmrg void unlock_shared() { _M_impl.unlock_shared(); } 4241debfc3dSmrg 4251debfc3dSmrg#if _GLIBCXX_USE_PTHREAD_RWLOCK_T 4261debfc3dSmrg typedef void* native_handle_type; 4271debfc3dSmrg native_handle_type native_handle() { return _M_impl.native_handle(); } 4281debfc3dSmrg 4291debfc3dSmrg private: 4301debfc3dSmrg __shared_mutex_pthread _M_impl; 4311debfc3dSmrg#else 4321debfc3dSmrg private: 4331debfc3dSmrg __shared_mutex_cv _M_impl; 4341debfc3dSmrg#endif 4351debfc3dSmrg }; 4361debfc3dSmrg#endif // C++17 4371debfc3dSmrg 438*8feb0f0bSmrg /// @cond undocumented 4391debfc3dSmrg#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 4401debfc3dSmrg using __shared_timed_mutex_base = __shared_mutex_pthread; 4411debfc3dSmrg#else 4421debfc3dSmrg using __shared_timed_mutex_base = __shared_mutex_cv; 4431debfc3dSmrg#endif 444*8feb0f0bSmrg /// @endcond 4451debfc3dSmrg 4461debfc3dSmrg /// The standard shared timed mutex type. 4471debfc3dSmrg class shared_timed_mutex 4481debfc3dSmrg : private __shared_timed_mutex_base 4491debfc3dSmrg { 4501debfc3dSmrg using _Base = __shared_timed_mutex_base; 4511debfc3dSmrg 4521debfc3dSmrg // Must use the same clock as condition_variable for __shared_mutex_cv. 453*8feb0f0bSmrg#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK 454*8feb0f0bSmrg using __clock_t = chrono::steady_clock; 455*8feb0f0bSmrg#else 456*8feb0f0bSmrg using __clock_t = chrono::system_clock; 457*8feb0f0bSmrg#endif 4581debfc3dSmrg 4591debfc3dSmrg public: 4601debfc3dSmrg shared_timed_mutex() = default; 4611debfc3dSmrg ~shared_timed_mutex() = default; 4621debfc3dSmrg 4631debfc3dSmrg shared_timed_mutex(const shared_timed_mutex&) = delete; 4641debfc3dSmrg shared_timed_mutex& operator=(const shared_timed_mutex&) = delete; 4651debfc3dSmrg 4661debfc3dSmrg // Exclusive ownership 4671debfc3dSmrg 4681debfc3dSmrg void lock() { _Base::lock(); } 4691debfc3dSmrg bool try_lock() { return _Base::try_lock(); } 4701debfc3dSmrg void unlock() { _Base::unlock(); } 4711debfc3dSmrg 4721debfc3dSmrg template<typename _Rep, typename _Period> 4731debfc3dSmrg bool 474*8feb0f0bSmrg try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) 4751debfc3dSmrg { 476*8feb0f0bSmrg auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime); 477*8feb0f0bSmrg if (ratio_greater<__clock_t::period, _Period>()) 478*8feb0f0bSmrg ++__rt; 479*8feb0f0bSmrg return try_lock_until(__clock_t::now() + __rt); 4801debfc3dSmrg } 4811debfc3dSmrg 4821debfc3dSmrg // Shared ownership 4831debfc3dSmrg 4841debfc3dSmrg void lock_shared() { _Base::lock_shared(); } 4851debfc3dSmrg bool try_lock_shared() { return _Base::try_lock_shared(); } 4861debfc3dSmrg void unlock_shared() { _Base::unlock_shared(); } 4871debfc3dSmrg 4881debfc3dSmrg template<typename _Rep, typename _Period> 4891debfc3dSmrg bool 490*8feb0f0bSmrg try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime) 4911debfc3dSmrg { 492*8feb0f0bSmrg auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime); 493*8feb0f0bSmrg if (ratio_greater<__clock_t::period, _Period>()) 494*8feb0f0bSmrg ++__rt; 495*8feb0f0bSmrg return try_lock_shared_until(__clock_t::now() + __rt); 4961debfc3dSmrg } 4971debfc3dSmrg 4981debfc3dSmrg#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 4991debfc3dSmrg 5001debfc3dSmrg // Exclusive ownership 5011debfc3dSmrg 5021debfc3dSmrg template<typename _Duration> 5031debfc3dSmrg bool 504*8feb0f0bSmrg try_lock_until(const chrono::time_point<chrono::system_clock, 505*8feb0f0bSmrg _Duration>& __atime) 5061debfc3dSmrg { 5071debfc3dSmrg auto __s = chrono::time_point_cast<chrono::seconds>(__atime); 5081debfc3dSmrg auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); 5091debfc3dSmrg 5101debfc3dSmrg __gthread_time_t __ts = 5111debfc3dSmrg { 5121debfc3dSmrg static_cast<std::time_t>(__s.time_since_epoch().count()), 5131debfc3dSmrg static_cast<long>(__ns.count()) 5141debfc3dSmrg }; 5151debfc3dSmrg 516c0a68be4Smrg int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts); 5171debfc3dSmrg // On self-deadlock, we just fail to acquire the lock. Technically, 5181debfc3dSmrg // the program violated the precondition. 5191debfc3dSmrg if (__ret == ETIMEDOUT || __ret == EDEADLK) 5201debfc3dSmrg return false; 5211debfc3dSmrg // Errors not handled: EINVAL 5221debfc3dSmrg __glibcxx_assert(__ret == 0); 5231debfc3dSmrg return true; 5241debfc3dSmrg } 5251debfc3dSmrg 526*8feb0f0bSmrg#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK 527*8feb0f0bSmrg template<typename _Duration> 528*8feb0f0bSmrg bool 529*8feb0f0bSmrg try_lock_until(const chrono::time_point<chrono::steady_clock, 530*8feb0f0bSmrg _Duration>& __atime) 531*8feb0f0bSmrg { 532*8feb0f0bSmrg auto __s = chrono::time_point_cast<chrono::seconds>(__atime); 533*8feb0f0bSmrg auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); 534*8feb0f0bSmrg 535*8feb0f0bSmrg __gthread_time_t __ts = 536*8feb0f0bSmrg { 537*8feb0f0bSmrg static_cast<std::time_t>(__s.time_since_epoch().count()), 538*8feb0f0bSmrg static_cast<long>(__ns.count()) 539*8feb0f0bSmrg }; 540*8feb0f0bSmrg 541*8feb0f0bSmrg int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC, 542*8feb0f0bSmrg &__ts); 543*8feb0f0bSmrg // On self-deadlock, we just fail to acquire the lock. Technically, 544*8feb0f0bSmrg // the program violated the precondition. 545*8feb0f0bSmrg if (__ret == ETIMEDOUT || __ret == EDEADLK) 546*8feb0f0bSmrg return false; 547*8feb0f0bSmrg // Errors not handled: EINVAL 548*8feb0f0bSmrg __glibcxx_assert(__ret == 0); 549*8feb0f0bSmrg return true; 550*8feb0f0bSmrg } 551*8feb0f0bSmrg#endif 552*8feb0f0bSmrg 5531debfc3dSmrg template<typename _Clock, typename _Duration> 5541debfc3dSmrg bool 555*8feb0f0bSmrg try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) 5561debfc3dSmrg { 557*8feb0f0bSmrg#if __cplusplus > 201703L 558*8feb0f0bSmrg static_assert(chrono::is_clock_v<_Clock>); 559*8feb0f0bSmrg#endif 560*8feb0f0bSmrg // The user-supplied clock may not tick at the same rate as 561*8feb0f0bSmrg // steady_clock, so we must loop in order to guarantee that 562*8feb0f0bSmrg // the timeout has expired before returning false. 563*8feb0f0bSmrg typename _Clock::time_point __now = _Clock::now(); 564*8feb0f0bSmrg do { 565*8feb0f0bSmrg auto __rtime = __atime - __now; 566*8feb0f0bSmrg if (try_lock_for(__rtime)) 567*8feb0f0bSmrg return true; 568*8feb0f0bSmrg __now = _Clock::now(); 569*8feb0f0bSmrg } while (__atime > __now); 570*8feb0f0bSmrg return false; 5711debfc3dSmrg } 5721debfc3dSmrg 5731debfc3dSmrg // Shared ownership 5741debfc3dSmrg 5751debfc3dSmrg template<typename _Duration> 5761debfc3dSmrg bool 577*8feb0f0bSmrg try_lock_shared_until(const chrono::time_point<chrono::system_clock, 5781debfc3dSmrg _Duration>& __atime) 5791debfc3dSmrg { 5801debfc3dSmrg auto __s = chrono::time_point_cast<chrono::seconds>(__atime); 5811debfc3dSmrg auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); 5821debfc3dSmrg 5831debfc3dSmrg __gthread_time_t __ts = 5841debfc3dSmrg { 5851debfc3dSmrg static_cast<std::time_t>(__s.time_since_epoch().count()), 5861debfc3dSmrg static_cast<long>(__ns.count()) 5871debfc3dSmrg }; 5881debfc3dSmrg 5891debfc3dSmrg int __ret; 5901debfc3dSmrg // Unlike for lock(), we are not allowed to throw an exception so if 5911debfc3dSmrg // the maximum number of read locks has been exceeded, or we would 5921debfc3dSmrg // deadlock, we just try to acquire the lock again (and will time out 5931debfc3dSmrg // eventually). 5941debfc3dSmrg // In cases where we would exceed the maximum number of read locks 5951debfc3dSmrg // throughout the whole time until the timeout, we will fail to 5961debfc3dSmrg // acquire the lock even if it would be logically free; however, this 5971debfc3dSmrg // is allowed by the standard, and we made a "strong effort" 5981debfc3dSmrg // (see C++14 30.4.1.4p26). 5991debfc3dSmrg // For cases where the implementation detects a deadlock we 6001debfc3dSmrg // intentionally block and timeout so that an early return isn't 6011debfc3dSmrg // mistaken for a spurious failure, which might help users realise 6021debfc3dSmrg // there is a deadlock. 6031debfc3dSmrg do 604c0a68be4Smrg __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts); 6051debfc3dSmrg while (__ret == EAGAIN || __ret == EDEADLK); 6061debfc3dSmrg if (__ret == ETIMEDOUT) 6071debfc3dSmrg return false; 6081debfc3dSmrg // Errors not handled: EINVAL 6091debfc3dSmrg __glibcxx_assert(__ret == 0); 6101debfc3dSmrg return true; 6111debfc3dSmrg } 6121debfc3dSmrg 613*8feb0f0bSmrg#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK 614*8feb0f0bSmrg template<typename _Duration> 615*8feb0f0bSmrg bool 616*8feb0f0bSmrg try_lock_shared_until(const chrono::time_point<chrono::steady_clock, 617*8feb0f0bSmrg _Duration>& __atime) 618*8feb0f0bSmrg { 619*8feb0f0bSmrg auto __s = chrono::time_point_cast<chrono::seconds>(__atime); 620*8feb0f0bSmrg auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); 621*8feb0f0bSmrg 622*8feb0f0bSmrg __gthread_time_t __ts = 623*8feb0f0bSmrg { 624*8feb0f0bSmrg static_cast<std::time_t>(__s.time_since_epoch().count()), 625*8feb0f0bSmrg static_cast<long>(__ns.count()) 626*8feb0f0bSmrg }; 627*8feb0f0bSmrg 628*8feb0f0bSmrg int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC, 629*8feb0f0bSmrg &__ts); 630*8feb0f0bSmrg // On self-deadlock, we just fail to acquire the lock. Technically, 631*8feb0f0bSmrg // the program violated the precondition. 632*8feb0f0bSmrg if (__ret == ETIMEDOUT || __ret == EDEADLK) 633*8feb0f0bSmrg return false; 634*8feb0f0bSmrg // Errors not handled: EINVAL 635*8feb0f0bSmrg __glibcxx_assert(__ret == 0); 636*8feb0f0bSmrg return true; 637*8feb0f0bSmrg } 638*8feb0f0bSmrg#endif 639*8feb0f0bSmrg 6401debfc3dSmrg template<typename _Clock, typename _Duration> 6411debfc3dSmrg bool 6421debfc3dSmrg try_lock_shared_until(const chrono::time_point<_Clock, 643*8feb0f0bSmrg _Duration>& __atime) 6441debfc3dSmrg { 645*8feb0f0bSmrg#if __cplusplus > 201703L 646*8feb0f0bSmrg static_assert(chrono::is_clock_v<_Clock>); 647*8feb0f0bSmrg#endif 648*8feb0f0bSmrg // The user-supplied clock may not tick at the same rate as 649*8feb0f0bSmrg // steady_clock, so we must loop in order to guarantee that 650*8feb0f0bSmrg // the timeout has expired before returning false. 651*8feb0f0bSmrg typename _Clock::time_point __now = _Clock::now(); 652*8feb0f0bSmrg do { 653*8feb0f0bSmrg auto __rtime = __atime - __now; 654*8feb0f0bSmrg if (try_lock_shared_for(__rtime)) 655*8feb0f0bSmrg return true; 656*8feb0f0bSmrg __now = _Clock::now(); 657*8feb0f0bSmrg } while (__atime > __now); 658*8feb0f0bSmrg return false; 6591debfc3dSmrg } 6601debfc3dSmrg 6611debfc3dSmrg#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK) 6621debfc3dSmrg 6631debfc3dSmrg // Exclusive ownership 6641debfc3dSmrg 6651debfc3dSmrg template<typename _Clock, typename _Duration> 6661debfc3dSmrg bool 6671debfc3dSmrg try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 6681debfc3dSmrg { 6691debfc3dSmrg unique_lock<mutex> __lk(_M_mut); 6701debfc3dSmrg if (!_M_gate1.wait_until(__lk, __abs_time, 6711debfc3dSmrg [=]{ return !_M_write_entered(); })) 6721debfc3dSmrg { 6731debfc3dSmrg return false; 6741debfc3dSmrg } 6751debfc3dSmrg _M_state |= _S_write_entered; 6761debfc3dSmrg if (!_M_gate2.wait_until(__lk, __abs_time, 6771debfc3dSmrg [=]{ return _M_readers() == 0; })) 6781debfc3dSmrg { 6791debfc3dSmrg _M_state ^= _S_write_entered; 6801debfc3dSmrg // Wake all threads blocked while the write-entered flag was set. 6811debfc3dSmrg _M_gate1.notify_all(); 6821debfc3dSmrg return false; 6831debfc3dSmrg } 6841debfc3dSmrg return true; 6851debfc3dSmrg } 6861debfc3dSmrg 6871debfc3dSmrg // Shared ownership 6881debfc3dSmrg 6891debfc3dSmrg template <typename _Clock, typename _Duration> 6901debfc3dSmrg bool 6911debfc3dSmrg try_lock_shared_until(const chrono::time_point<_Clock, 6921debfc3dSmrg _Duration>& __abs_time) 6931debfc3dSmrg { 6941debfc3dSmrg unique_lock<mutex> __lk(_M_mut); 6951debfc3dSmrg if (!_M_gate1.wait_until(__lk, __abs_time, 6961debfc3dSmrg [=]{ return _M_state < _S_max_readers; })) 6971debfc3dSmrg { 6981debfc3dSmrg return false; 6991debfc3dSmrg } 7001debfc3dSmrg ++_M_state; 7011debfc3dSmrg return true; 7021debfc3dSmrg } 7031debfc3dSmrg 7041debfc3dSmrg#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK 7051debfc3dSmrg }; 7061debfc3dSmrg#endif // _GLIBCXX_HAS_GTHREADS 7071debfc3dSmrg 7081debfc3dSmrg /// shared_lock 7091debfc3dSmrg template<typename _Mutex> 7101debfc3dSmrg class shared_lock 7111debfc3dSmrg { 7121debfc3dSmrg public: 7131debfc3dSmrg typedef _Mutex mutex_type; 7141debfc3dSmrg 7151debfc3dSmrg // Shared locking 7161debfc3dSmrg 7171debfc3dSmrg shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { } 7181debfc3dSmrg 7191debfc3dSmrg explicit 7201debfc3dSmrg shared_lock(mutex_type& __m) 7211debfc3dSmrg : _M_pm(std::__addressof(__m)), _M_owns(true) 7221debfc3dSmrg { __m.lock_shared(); } 7231debfc3dSmrg 7241debfc3dSmrg shared_lock(mutex_type& __m, defer_lock_t) noexcept 7251debfc3dSmrg : _M_pm(std::__addressof(__m)), _M_owns(false) { } 7261debfc3dSmrg 7271debfc3dSmrg shared_lock(mutex_type& __m, try_to_lock_t) 7281debfc3dSmrg : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { } 7291debfc3dSmrg 7301debfc3dSmrg shared_lock(mutex_type& __m, adopt_lock_t) 7311debfc3dSmrg : _M_pm(std::__addressof(__m)), _M_owns(true) { } 7321debfc3dSmrg 7331debfc3dSmrg template<typename _Clock, typename _Duration> 7341debfc3dSmrg shared_lock(mutex_type& __m, 7351debfc3dSmrg const chrono::time_point<_Clock, _Duration>& __abs_time) 7361debfc3dSmrg : _M_pm(std::__addressof(__m)), 7371debfc3dSmrg _M_owns(__m.try_lock_shared_until(__abs_time)) { } 7381debfc3dSmrg 7391debfc3dSmrg template<typename _Rep, typename _Period> 7401debfc3dSmrg shared_lock(mutex_type& __m, 7411debfc3dSmrg const chrono::duration<_Rep, _Period>& __rel_time) 7421debfc3dSmrg : _M_pm(std::__addressof(__m)), 7431debfc3dSmrg _M_owns(__m.try_lock_shared_for(__rel_time)) { } 7441debfc3dSmrg 7451debfc3dSmrg ~shared_lock() 7461debfc3dSmrg { 7471debfc3dSmrg if (_M_owns) 7481debfc3dSmrg _M_pm->unlock_shared(); 7491debfc3dSmrg } 7501debfc3dSmrg 7511debfc3dSmrg shared_lock(shared_lock const&) = delete; 7521debfc3dSmrg shared_lock& operator=(shared_lock const&) = delete; 7531debfc3dSmrg 7541debfc3dSmrg shared_lock(shared_lock&& __sl) noexcept : shared_lock() 7551debfc3dSmrg { swap(__sl); } 7561debfc3dSmrg 7571debfc3dSmrg shared_lock& 7581debfc3dSmrg operator=(shared_lock&& __sl) noexcept 7591debfc3dSmrg { 7601debfc3dSmrg shared_lock(std::move(__sl)).swap(*this); 7611debfc3dSmrg return *this; 7621debfc3dSmrg } 7631debfc3dSmrg 7641debfc3dSmrg void 7651debfc3dSmrg lock() 7661debfc3dSmrg { 7671debfc3dSmrg _M_lockable(); 7681debfc3dSmrg _M_pm->lock_shared(); 7691debfc3dSmrg _M_owns = true; 7701debfc3dSmrg } 7711debfc3dSmrg 7721debfc3dSmrg bool 7731debfc3dSmrg try_lock() 7741debfc3dSmrg { 7751debfc3dSmrg _M_lockable(); 7761debfc3dSmrg return _M_owns = _M_pm->try_lock_shared(); 7771debfc3dSmrg } 7781debfc3dSmrg 7791debfc3dSmrg template<typename _Rep, typename _Period> 7801debfc3dSmrg bool 7811debfc3dSmrg try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time) 7821debfc3dSmrg { 7831debfc3dSmrg _M_lockable(); 7841debfc3dSmrg return _M_owns = _M_pm->try_lock_shared_for(__rel_time); 7851debfc3dSmrg } 7861debfc3dSmrg 7871debfc3dSmrg template<typename _Clock, typename _Duration> 7881debfc3dSmrg bool 7891debfc3dSmrg try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 7901debfc3dSmrg { 7911debfc3dSmrg _M_lockable(); 7921debfc3dSmrg return _M_owns = _M_pm->try_lock_shared_until(__abs_time); 7931debfc3dSmrg } 7941debfc3dSmrg 7951debfc3dSmrg void 7961debfc3dSmrg unlock() 7971debfc3dSmrg { 7981debfc3dSmrg if (!_M_owns) 7991debfc3dSmrg __throw_system_error(int(errc::resource_deadlock_would_occur)); 8001debfc3dSmrg _M_pm->unlock_shared(); 8011debfc3dSmrg _M_owns = false; 8021debfc3dSmrg } 8031debfc3dSmrg 8041debfc3dSmrg // Setters 8051debfc3dSmrg 8061debfc3dSmrg void 8071debfc3dSmrg swap(shared_lock& __u) noexcept 8081debfc3dSmrg { 8091debfc3dSmrg std::swap(_M_pm, __u._M_pm); 8101debfc3dSmrg std::swap(_M_owns, __u._M_owns); 8111debfc3dSmrg } 8121debfc3dSmrg 8131debfc3dSmrg mutex_type* 8141debfc3dSmrg release() noexcept 8151debfc3dSmrg { 8161debfc3dSmrg _M_owns = false; 8171debfc3dSmrg return std::exchange(_M_pm, nullptr); 8181debfc3dSmrg } 8191debfc3dSmrg 8201debfc3dSmrg // Getters 8211debfc3dSmrg 8221debfc3dSmrg bool owns_lock() const noexcept { return _M_owns; } 8231debfc3dSmrg 8241debfc3dSmrg explicit operator bool() const noexcept { return _M_owns; } 8251debfc3dSmrg 8261debfc3dSmrg mutex_type* mutex() const noexcept { return _M_pm; } 8271debfc3dSmrg 8281debfc3dSmrg private: 8291debfc3dSmrg void 8301debfc3dSmrg _M_lockable() const 8311debfc3dSmrg { 8321debfc3dSmrg if (_M_pm == nullptr) 8331debfc3dSmrg __throw_system_error(int(errc::operation_not_permitted)); 8341debfc3dSmrg if (_M_owns) 8351debfc3dSmrg __throw_system_error(int(errc::resource_deadlock_would_occur)); 8361debfc3dSmrg } 8371debfc3dSmrg 8381debfc3dSmrg mutex_type* _M_pm; 8391debfc3dSmrg bool _M_owns; 8401debfc3dSmrg }; 8411debfc3dSmrg 8421debfc3dSmrg /// Swap specialization for shared_lock 843*8feb0f0bSmrg /// @relates shared_mutex 8441debfc3dSmrg template<typename _Mutex> 8451debfc3dSmrg void 8461debfc3dSmrg swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept 8471debfc3dSmrg { __x.swap(__y); } 8481debfc3dSmrg 849*8feb0f0bSmrg /// @} group mutexes 8501debfc3dSmrg_GLIBCXX_END_NAMESPACE_VERSION 8511debfc3dSmrg} // namespace 8521debfc3dSmrg 8531debfc3dSmrg#endif // C++14 8541debfc3dSmrg 8551debfc3dSmrg#endif // _GLIBCXX_SHARED_MUTEX 856