xref: /dflybsd-src/contrib/gcc-8.0/libstdc++-v3/include/std/shared_mutex (revision 38fd149817dfbff97799f62fcb70be98c4e32523)
1*38fd1498Szrj// <shared_mutex> -*- C++ -*-
2*38fd1498Szrj
3*38fd1498Szrj// Copyright (C) 2013-2018 Free Software Foundation, Inc.
4*38fd1498Szrj//
5*38fd1498Szrj// This file is part of the GNU ISO C++ Library.  This library is free
6*38fd1498Szrj// software; you can redistribute it and/or modify it under the
7*38fd1498Szrj// terms of the GNU General Public License as published by the
8*38fd1498Szrj// Free Software Foundation; either version 3, or (at your option)
9*38fd1498Szrj// any later version.
10*38fd1498Szrj
11*38fd1498Szrj// This library is distributed in the hope that it will be useful,
12*38fd1498Szrj// but WITHOUT ANY WARRANTY; without even the implied warranty of
13*38fd1498Szrj// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14*38fd1498Szrj// GNU General Public License for more details.
15*38fd1498Szrj
16*38fd1498Szrj// Under Section 7 of GPL version 3, you are granted additional
17*38fd1498Szrj// permissions described in the GCC Runtime Library Exception, version
18*38fd1498Szrj// 3.1, as published by the Free Software Foundation.
19*38fd1498Szrj
20*38fd1498Szrj// You should have received a copy of the GNU General Public License and
21*38fd1498Szrj// a copy of the GCC Runtime Library Exception along with this program;
22*38fd1498Szrj// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23*38fd1498Szrj// <http://www.gnu.org/licenses/>.
24*38fd1498Szrj
25*38fd1498Szrj/** @file include/shared_mutex
26*38fd1498Szrj *  This is a Standard C++ Library header.
27*38fd1498Szrj */
28*38fd1498Szrj
29*38fd1498Szrj#ifndef _GLIBCXX_SHARED_MUTEX
30*38fd1498Szrj#define _GLIBCXX_SHARED_MUTEX 1
31*38fd1498Szrj
32*38fd1498Szrj#pragma GCC system_header
33*38fd1498Szrj
34*38fd1498Szrj#if __cplusplus >= 201402L
35*38fd1498Szrj
36*38fd1498Szrj#include <bits/c++config.h>
37*38fd1498Szrj#include <condition_variable>
38*38fd1498Szrj#include <bits/functexcept.h>
39*38fd1498Szrj
40*38fd1498Szrjnamespace std _GLIBCXX_VISIBILITY(default)
41*38fd1498Szrj{
42*38fd1498Szrj_GLIBCXX_BEGIN_NAMESPACE_VERSION
43*38fd1498Szrj
44*38fd1498Szrj  /**
45*38fd1498Szrj   * @ingroup mutexes
46*38fd1498Szrj   * @{
47*38fd1498Szrj   */
48*38fd1498Szrj
49*38fd1498Szrj#ifdef _GLIBCXX_USE_C99_STDINT_TR1
50*38fd1498Szrj#ifdef _GLIBCXX_HAS_GTHREADS
51*38fd1498Szrj
52*38fd1498Szrj#if __cplusplus >= 201703L
53*38fd1498Szrj#define __cpp_lib_shared_mutex 201505
54*38fd1498Szrj  class shared_mutex;
55*38fd1498Szrj#endif
56*38fd1498Szrj
57*38fd1498Szrj#define __cpp_lib_shared_timed_mutex 201402
58*38fd1498Szrj  class shared_timed_mutex;
59*38fd1498Szrj
60*38fd1498Szrj#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
61*38fd1498Szrj  /// A shared mutex type implemented using pthread_rwlock_t.
62*38fd1498Szrj  class __shared_mutex_pthread
63*38fd1498Szrj  {
64*38fd1498Szrj    friend class shared_timed_mutex;
65*38fd1498Szrj
66*38fd1498Szrj#ifdef PTHREAD_RWLOCK_INITIALIZER
67*38fd1498Szrj    pthread_rwlock_t	_M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
68*38fd1498Szrj
69*38fd1498Szrj  public:
70*38fd1498Szrj    __shared_mutex_pthread() = default;
71*38fd1498Szrj    ~__shared_mutex_pthread() = default;
72*38fd1498Szrj#else
73*38fd1498Szrj    pthread_rwlock_t	_M_rwlock;
74*38fd1498Szrj
75*38fd1498Szrj  public:
76*38fd1498Szrj    __shared_mutex_pthread()
77*38fd1498Szrj    {
78*38fd1498Szrj      int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
79*38fd1498Szrj      if (__ret == ENOMEM)
80*38fd1498Szrj	__throw_bad_alloc();
81*38fd1498Szrj      else if (__ret == EAGAIN)
82*38fd1498Szrj	__throw_system_error(int(errc::resource_unavailable_try_again));
83*38fd1498Szrj      else if (__ret == EPERM)
84*38fd1498Szrj	__throw_system_error(int(errc::operation_not_permitted));
85*38fd1498Szrj      // Errors not handled: EBUSY, EINVAL
86*38fd1498Szrj      __glibcxx_assert(__ret == 0);
87*38fd1498Szrj    }
88*38fd1498Szrj
89*38fd1498Szrj    ~__shared_mutex_pthread()
90*38fd1498Szrj    {
91*38fd1498Szrj      int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
92*38fd1498Szrj      // Errors not handled: EBUSY, EINVAL
93*38fd1498Szrj      __glibcxx_assert(__ret == 0);
94*38fd1498Szrj    }
95*38fd1498Szrj#endif
96*38fd1498Szrj
97*38fd1498Szrj    __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
98*38fd1498Szrj    __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
99*38fd1498Szrj
100*38fd1498Szrj    void
101*38fd1498Szrj    lock()
102*38fd1498Szrj    {
103*38fd1498Szrj      int __ret = pthread_rwlock_wrlock(&_M_rwlock);
104*38fd1498Szrj      if (__ret == EDEADLK)
105*38fd1498Szrj	__throw_system_error(int(errc::resource_deadlock_would_occur));
106*38fd1498Szrj      // Errors not handled: EINVAL
107*38fd1498Szrj      __glibcxx_assert(__ret == 0);
108*38fd1498Szrj    }
109*38fd1498Szrj
110*38fd1498Szrj    bool
111*38fd1498Szrj    try_lock()
112*38fd1498Szrj    {
113*38fd1498Szrj      int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
114*38fd1498Szrj      if (__ret == EBUSY) return false;
115*38fd1498Szrj      // Errors not handled: EINVAL
116*38fd1498Szrj      __glibcxx_assert(__ret == 0);
117*38fd1498Szrj      return true;
118*38fd1498Szrj    }
119*38fd1498Szrj
120*38fd1498Szrj    void
121*38fd1498Szrj    unlock()
122*38fd1498Szrj    {
123*38fd1498Szrj      int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
124*38fd1498Szrj      // Errors not handled: EPERM, EBUSY, EINVAL
125*38fd1498Szrj      __glibcxx_assert(__ret == 0);
126*38fd1498Szrj    }
127*38fd1498Szrj
128*38fd1498Szrj    // Shared ownership
129*38fd1498Szrj
130*38fd1498Szrj    void
131*38fd1498Szrj    lock_shared()
132*38fd1498Szrj    {
133*38fd1498Szrj      int __ret;
134*38fd1498Szrj      // We retry if we exceeded the maximum number of read locks supported by
135*38fd1498Szrj      // the POSIX implementation; this can result in busy-waiting, but this
136*38fd1498Szrj      // is okay based on the current specification of forward progress
137*38fd1498Szrj      // guarantees by the standard.
138*38fd1498Szrj      do
139*38fd1498Szrj	__ret = pthread_rwlock_rdlock(&_M_rwlock);
140*38fd1498Szrj      while (__ret == EAGAIN);
141*38fd1498Szrj      if (__ret == EDEADLK)
142*38fd1498Szrj	__throw_system_error(int(errc::resource_deadlock_would_occur));
143*38fd1498Szrj      // Errors not handled: EINVAL
144*38fd1498Szrj      __glibcxx_assert(__ret == 0);
145*38fd1498Szrj    }
146*38fd1498Szrj
147*38fd1498Szrj    bool
148*38fd1498Szrj    try_lock_shared()
149*38fd1498Szrj    {
150*38fd1498Szrj      int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
151*38fd1498Szrj      // If the maximum number of read locks has been exceeded, we just fail
152*38fd1498Szrj      // to acquire the lock.  Unlike for lock(), we are not allowed to throw
153*38fd1498Szrj      // an exception.
154*38fd1498Szrj      if (__ret == EBUSY || __ret == EAGAIN) return false;
155*38fd1498Szrj      // Errors not handled: EINVAL
156*38fd1498Szrj      __glibcxx_assert(__ret == 0);
157*38fd1498Szrj      return true;
158*38fd1498Szrj    }
159*38fd1498Szrj
160*38fd1498Szrj    void
161*38fd1498Szrj    unlock_shared()
162*38fd1498Szrj    {
163*38fd1498Szrj      unlock();
164*38fd1498Szrj    }
165*38fd1498Szrj
166*38fd1498Szrj    void* native_handle() { return &_M_rwlock; }
167*38fd1498Szrj  };
168*38fd1498Szrj#endif
169*38fd1498Szrj
170*38fd1498Szrj#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
171*38fd1498Szrj  /// A shared mutex type implemented using std::condition_variable.
172*38fd1498Szrj  class __shared_mutex_cv
173*38fd1498Szrj  {
174*38fd1498Szrj    friend class shared_timed_mutex;
175*38fd1498Szrj
176*38fd1498Szrj    // Based on Howard Hinnant's reference implementation from N2406.
177*38fd1498Szrj
178*38fd1498Szrj    // The high bit of _M_state is the write-entered flag which is set to
179*38fd1498Szrj    // indicate a writer has taken the lock or is queuing to take the lock.
180*38fd1498Szrj    // The remaining bits are the count of reader locks.
181*38fd1498Szrj    //
182*38fd1498Szrj    // To take a reader lock, block on gate1 while the write-entered flag is
183*38fd1498Szrj    // set or the maximum number of reader locks is held, then increment the
184*38fd1498Szrj    // reader lock count.
185*38fd1498Szrj    // To release, decrement the count, then if the write-entered flag is set
186*38fd1498Szrj    // and the count is zero then signal gate2 to wake a queued writer,
187*38fd1498Szrj    // otherwise if the maximum number of reader locks was held signal gate1
188*38fd1498Szrj    // to wake a reader.
189*38fd1498Szrj    //
190*38fd1498Szrj    // To take a writer lock, block on gate1 while the write-entered flag is
191*38fd1498Szrj    // set, then set the write-entered flag to start queueing, then block on
192*38fd1498Szrj    // gate2 while the number of reader locks is non-zero.
193*38fd1498Szrj    // To release, unset the write-entered flag and signal gate1 to wake all
194*38fd1498Szrj    // blocked readers and writers.
195*38fd1498Szrj    //
196*38fd1498Szrj    // This means that when no reader locks are held readers and writers get
197*38fd1498Szrj    // equal priority. When one or more reader locks is held a writer gets
198*38fd1498Szrj    // priority and no more reader locks can be taken while the writer is
199*38fd1498Szrj    // queued.
200*38fd1498Szrj
201*38fd1498Szrj    // Only locked when accessing _M_state or waiting on condition variables.
202*38fd1498Szrj    mutex		_M_mut;
203*38fd1498Szrj    // Used to block while write-entered is set or reader count at maximum.
204*38fd1498Szrj    condition_variable	_M_gate1;
205*38fd1498Szrj    // Used to block queued writers while reader count is non-zero.
206*38fd1498Szrj    condition_variable	_M_gate2;
207*38fd1498Szrj    // The write-entered flag and reader count.
208*38fd1498Szrj    unsigned		_M_state;
209*38fd1498Szrj
210*38fd1498Szrj    static constexpr unsigned _S_write_entered
211*38fd1498Szrj      = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
212*38fd1498Szrj    static constexpr unsigned _S_max_readers = ~_S_write_entered;
213*38fd1498Szrj
214*38fd1498Szrj    // Test whether the write-entered flag is set. _M_mut must be locked.
215*38fd1498Szrj    bool _M_write_entered() const { return _M_state & _S_write_entered; }
216*38fd1498Szrj
217*38fd1498Szrj    // The number of reader locks currently held. _M_mut must be locked.
218*38fd1498Szrj    unsigned _M_readers() const { return _M_state & _S_max_readers; }
219*38fd1498Szrj
220*38fd1498Szrj  public:
221*38fd1498Szrj    __shared_mutex_cv() : _M_state(0) {}
222*38fd1498Szrj
223*38fd1498Szrj    ~__shared_mutex_cv()
224*38fd1498Szrj    {
225*38fd1498Szrj      __glibcxx_assert( _M_state == 0 );
226*38fd1498Szrj    }
227*38fd1498Szrj
228*38fd1498Szrj    __shared_mutex_cv(const __shared_mutex_cv&) = delete;
229*38fd1498Szrj    __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
230*38fd1498Szrj
231*38fd1498Szrj    // Exclusive ownership
232*38fd1498Szrj
233*38fd1498Szrj    void
234*38fd1498Szrj    lock()
235*38fd1498Szrj    {
236*38fd1498Szrj      unique_lock<mutex> __lk(_M_mut);
237*38fd1498Szrj      // Wait until we can set the write-entered flag.
238*38fd1498Szrj      _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
239*38fd1498Szrj      _M_state |= _S_write_entered;
240*38fd1498Szrj      // Then wait until there are no more readers.
241*38fd1498Szrj      _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
242*38fd1498Szrj    }
243*38fd1498Szrj
244*38fd1498Szrj    bool
245*38fd1498Szrj    try_lock()
246*38fd1498Szrj    {
247*38fd1498Szrj      unique_lock<mutex> __lk(_M_mut, try_to_lock);
248*38fd1498Szrj      if (__lk.owns_lock() && _M_state == 0)
249*38fd1498Szrj	{
250*38fd1498Szrj	  _M_state = _S_write_entered;
251*38fd1498Szrj	  return true;
252*38fd1498Szrj	}
253*38fd1498Szrj      return false;
254*38fd1498Szrj    }
255*38fd1498Szrj
256*38fd1498Szrj    void
257*38fd1498Szrj    unlock()
258*38fd1498Szrj    {
259*38fd1498Szrj      lock_guard<mutex> __lk(_M_mut);
260*38fd1498Szrj      __glibcxx_assert( _M_write_entered() );
261*38fd1498Szrj      _M_state = 0;
262*38fd1498Szrj      // call notify_all() while mutex is held so that another thread can't
263*38fd1498Szrj      // lock and unlock the mutex then destroy *this before we make the call.
264*38fd1498Szrj      _M_gate1.notify_all();
265*38fd1498Szrj    }
266*38fd1498Szrj
267*38fd1498Szrj    // Shared ownership
268*38fd1498Szrj
269*38fd1498Szrj    void
270*38fd1498Szrj    lock_shared()
271*38fd1498Szrj    {
272*38fd1498Szrj      unique_lock<mutex> __lk(_M_mut);
273*38fd1498Szrj      _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
274*38fd1498Szrj      ++_M_state;
275*38fd1498Szrj    }
276*38fd1498Szrj
277*38fd1498Szrj    bool
278*38fd1498Szrj    try_lock_shared()
279*38fd1498Szrj    {
280*38fd1498Szrj      unique_lock<mutex> __lk(_M_mut, try_to_lock);
281*38fd1498Szrj      if (!__lk.owns_lock())
282*38fd1498Szrj	return false;
283*38fd1498Szrj      if (_M_state < _S_max_readers)
284*38fd1498Szrj	{
285*38fd1498Szrj	  ++_M_state;
286*38fd1498Szrj	  return true;
287*38fd1498Szrj	}
288*38fd1498Szrj      return false;
289*38fd1498Szrj    }
290*38fd1498Szrj
291*38fd1498Szrj    void
292*38fd1498Szrj    unlock_shared()
293*38fd1498Szrj    {
294*38fd1498Szrj      lock_guard<mutex> __lk(_M_mut);
295*38fd1498Szrj      __glibcxx_assert( _M_readers() > 0 );
296*38fd1498Szrj      auto __prev = _M_state--;
297*38fd1498Szrj      if (_M_write_entered())
298*38fd1498Szrj	{
299*38fd1498Szrj	  // Wake the queued writer if there are no more readers.
300*38fd1498Szrj	  if (_M_readers() == 0)
301*38fd1498Szrj	    _M_gate2.notify_one();
302*38fd1498Szrj	  // No need to notify gate1 because we give priority to the queued
303*38fd1498Szrj	  // writer, and that writer will eventually notify gate1 after it
304*38fd1498Szrj	  // clears the write-entered flag.
305*38fd1498Szrj	}
306*38fd1498Szrj      else
307*38fd1498Szrj	{
308*38fd1498Szrj	  // Wake any thread that was blocked on reader overflow.
309*38fd1498Szrj	  if (__prev == _S_max_readers)
310*38fd1498Szrj	    _M_gate1.notify_one();
311*38fd1498Szrj	}
312*38fd1498Szrj    }
313*38fd1498Szrj  };
314*38fd1498Szrj#endif
315*38fd1498Szrj
316*38fd1498Szrj#if __cplusplus > 201402L
317*38fd1498Szrj  /// The standard shared mutex type.
318*38fd1498Szrj  class shared_mutex
319*38fd1498Szrj  {
320*38fd1498Szrj  public:
321*38fd1498Szrj    shared_mutex() = default;
322*38fd1498Szrj    ~shared_mutex() = default;
323*38fd1498Szrj
324*38fd1498Szrj    shared_mutex(const shared_mutex&) = delete;
325*38fd1498Szrj    shared_mutex& operator=(const shared_mutex&) = delete;
326*38fd1498Szrj
327*38fd1498Szrj    // Exclusive ownership
328*38fd1498Szrj
329*38fd1498Szrj    void lock() { _M_impl.lock(); }
330*38fd1498Szrj    bool try_lock() { return _M_impl.try_lock(); }
331*38fd1498Szrj    void unlock() { _M_impl.unlock(); }
332*38fd1498Szrj
333*38fd1498Szrj    // Shared ownership
334*38fd1498Szrj
335*38fd1498Szrj    void lock_shared() { _M_impl.lock_shared(); }
336*38fd1498Szrj    bool try_lock_shared() { return _M_impl.try_lock_shared(); }
337*38fd1498Szrj    void unlock_shared() { _M_impl.unlock_shared(); }
338*38fd1498Szrj
339*38fd1498Szrj#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
340*38fd1498Szrj    typedef void* native_handle_type;
341*38fd1498Szrj    native_handle_type native_handle() { return _M_impl.native_handle(); }
342*38fd1498Szrj
343*38fd1498Szrj  private:
344*38fd1498Szrj    __shared_mutex_pthread _M_impl;
345*38fd1498Szrj#else
346*38fd1498Szrj  private:
347*38fd1498Szrj    __shared_mutex_cv _M_impl;
348*38fd1498Szrj#endif
349*38fd1498Szrj  };
350*38fd1498Szrj#endif // C++17
351*38fd1498Szrj
352*38fd1498Szrj#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
353*38fd1498Szrj  using __shared_timed_mutex_base = __shared_mutex_pthread;
354*38fd1498Szrj#else
355*38fd1498Szrj  using __shared_timed_mutex_base = __shared_mutex_cv;
356*38fd1498Szrj#endif
357*38fd1498Szrj
358*38fd1498Szrj  /// The standard shared timed mutex type.
359*38fd1498Szrj  class shared_timed_mutex
360*38fd1498Szrj  : private __shared_timed_mutex_base
361*38fd1498Szrj  {
362*38fd1498Szrj    using _Base = __shared_timed_mutex_base;
363*38fd1498Szrj
364*38fd1498Szrj    // Must use the same clock as condition_variable for __shared_mutex_cv.
365*38fd1498Szrj    typedef chrono::system_clock	__clock_t;
366*38fd1498Szrj
367*38fd1498Szrj  public:
368*38fd1498Szrj    shared_timed_mutex() = default;
369*38fd1498Szrj    ~shared_timed_mutex() = default;
370*38fd1498Szrj
371*38fd1498Szrj    shared_timed_mutex(const shared_timed_mutex&) = delete;
372*38fd1498Szrj    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
373*38fd1498Szrj
374*38fd1498Szrj    // Exclusive ownership
375*38fd1498Szrj
376*38fd1498Szrj    void lock() { _Base::lock(); }
377*38fd1498Szrj    bool try_lock() { return _Base::try_lock(); }
378*38fd1498Szrj    void unlock() { _Base::unlock(); }
379*38fd1498Szrj
380*38fd1498Szrj    template<typename _Rep, typename _Period>
381*38fd1498Szrj      bool
382*38fd1498Szrj      try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
383*38fd1498Szrj      {
384*38fd1498Szrj	return try_lock_until(__clock_t::now() + __rel_time);
385*38fd1498Szrj      }
386*38fd1498Szrj
387*38fd1498Szrj    // Shared ownership
388*38fd1498Szrj
389*38fd1498Szrj    void lock_shared() { _Base::lock_shared(); }
390*38fd1498Szrj    bool try_lock_shared() { return _Base::try_lock_shared(); }
391*38fd1498Szrj    void unlock_shared() { _Base::unlock_shared(); }
392*38fd1498Szrj
393*38fd1498Szrj    template<typename _Rep, typename _Period>
394*38fd1498Szrj      bool
395*38fd1498Szrj      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
396*38fd1498Szrj      {
397*38fd1498Szrj	return try_lock_shared_until(__clock_t::now() + __rel_time);
398*38fd1498Szrj      }
399*38fd1498Szrj
400*38fd1498Szrj#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
401*38fd1498Szrj
402*38fd1498Szrj    // Exclusive ownership
403*38fd1498Szrj
404*38fd1498Szrj    template<typename _Duration>
405*38fd1498Szrj      bool
406*38fd1498Szrj      try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
407*38fd1498Szrj      {
408*38fd1498Szrj	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
409*38fd1498Szrj	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
410*38fd1498Szrj
411*38fd1498Szrj	__gthread_time_t __ts =
412*38fd1498Szrj	  {
413*38fd1498Szrj	    static_cast<std::time_t>(__s.time_since_epoch().count()),
414*38fd1498Szrj	    static_cast<long>(__ns.count())
415*38fd1498Szrj	  };
416*38fd1498Szrj
417*38fd1498Szrj	int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
418*38fd1498Szrj	// On self-deadlock, we just fail to acquire the lock.  Technically,
419*38fd1498Szrj	// the program violated the precondition.
420*38fd1498Szrj	if (__ret == ETIMEDOUT || __ret == EDEADLK)
421*38fd1498Szrj	  return false;
422*38fd1498Szrj	// Errors not handled: EINVAL
423*38fd1498Szrj	__glibcxx_assert(__ret == 0);
424*38fd1498Szrj	return true;
425*38fd1498Szrj      }
426*38fd1498Szrj
427*38fd1498Szrj    template<typename _Clock, typename _Duration>
428*38fd1498Szrj      bool
429*38fd1498Szrj      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
430*38fd1498Szrj      {
431*38fd1498Szrj	// DR 887 - Sync unknown clock to known clock.
432*38fd1498Szrj	const typename _Clock::time_point __c_entry = _Clock::now();
433*38fd1498Szrj	const __clock_t::time_point __s_entry = __clock_t::now();
434*38fd1498Szrj	const auto __delta = __abs_time - __c_entry;
435*38fd1498Szrj	const auto __s_atime = __s_entry + __delta;
436*38fd1498Szrj	return try_lock_until(__s_atime);
437*38fd1498Szrj      }
438*38fd1498Szrj
439*38fd1498Szrj    // Shared ownership
440*38fd1498Szrj
441*38fd1498Szrj    template<typename _Duration>
442*38fd1498Szrj      bool
443*38fd1498Szrj      try_lock_shared_until(const chrono::time_point<__clock_t,
444*38fd1498Szrj			    _Duration>& __atime)
445*38fd1498Szrj      {
446*38fd1498Szrj	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
447*38fd1498Szrj	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
448*38fd1498Szrj
449*38fd1498Szrj	__gthread_time_t __ts =
450*38fd1498Szrj	  {
451*38fd1498Szrj	    static_cast<std::time_t>(__s.time_since_epoch().count()),
452*38fd1498Szrj	    static_cast<long>(__ns.count())
453*38fd1498Szrj	  };
454*38fd1498Szrj
455*38fd1498Szrj	int __ret;
456*38fd1498Szrj	// Unlike for lock(), we are not allowed to throw an exception so if
457*38fd1498Szrj	// the maximum number of read locks has been exceeded, or we would
458*38fd1498Szrj	// deadlock, we just try to acquire the lock again (and will time out
459*38fd1498Szrj	// eventually).
460*38fd1498Szrj	// In cases where we would exceed the maximum number of read locks
461*38fd1498Szrj	// throughout the whole time until the timeout, we will fail to
462*38fd1498Szrj	// acquire the lock even if it would be logically free; however, this
463*38fd1498Szrj	// is allowed by the standard, and we made a "strong effort"
464*38fd1498Szrj	// (see C++14 30.4.1.4p26).
465*38fd1498Szrj	// For cases where the implementation detects a deadlock we
466*38fd1498Szrj	// intentionally block and timeout so that an early return isn't
467*38fd1498Szrj	// mistaken for a spurious failure, which might help users realise
468*38fd1498Szrj	// there is a deadlock.
469*38fd1498Szrj	do
470*38fd1498Szrj	  __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
471*38fd1498Szrj	while (__ret == EAGAIN || __ret == EDEADLK);
472*38fd1498Szrj	if (__ret == ETIMEDOUT)
473*38fd1498Szrj	  return false;
474*38fd1498Szrj	// Errors not handled: EINVAL
475*38fd1498Szrj	__glibcxx_assert(__ret == 0);
476*38fd1498Szrj	return true;
477*38fd1498Szrj      }
478*38fd1498Szrj
479*38fd1498Szrj    template<typename _Clock, typename _Duration>
480*38fd1498Szrj      bool
481*38fd1498Szrj      try_lock_shared_until(const chrono::time_point<_Clock,
482*38fd1498Szrj						     _Duration>& __abs_time)
483*38fd1498Szrj      {
484*38fd1498Szrj	// DR 887 - Sync unknown clock to known clock.
485*38fd1498Szrj	const typename _Clock::time_point __c_entry = _Clock::now();
486*38fd1498Szrj	const __clock_t::time_point __s_entry = __clock_t::now();
487*38fd1498Szrj	const auto __delta = __abs_time - __c_entry;
488*38fd1498Szrj	const auto __s_atime = __s_entry + __delta;
489*38fd1498Szrj	return try_lock_shared_until(__s_atime);
490*38fd1498Szrj      }
491*38fd1498Szrj
492*38fd1498Szrj#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
493*38fd1498Szrj
494*38fd1498Szrj    // Exclusive ownership
495*38fd1498Szrj
496*38fd1498Szrj    template<typename _Clock, typename _Duration>
497*38fd1498Szrj      bool
498*38fd1498Szrj      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
499*38fd1498Szrj      {
500*38fd1498Szrj	unique_lock<mutex> __lk(_M_mut);
501*38fd1498Szrj	if (!_M_gate1.wait_until(__lk, __abs_time,
502*38fd1498Szrj				 [=]{ return !_M_write_entered(); }))
503*38fd1498Szrj	  {
504*38fd1498Szrj	    return false;
505*38fd1498Szrj	  }
506*38fd1498Szrj	_M_state |= _S_write_entered;
507*38fd1498Szrj	if (!_M_gate2.wait_until(__lk, __abs_time,
508*38fd1498Szrj				 [=]{ return _M_readers() == 0; }))
509*38fd1498Szrj	  {
510*38fd1498Szrj	    _M_state ^= _S_write_entered;
511*38fd1498Szrj	    // Wake all threads blocked while the write-entered flag was set.
512*38fd1498Szrj	    _M_gate1.notify_all();
513*38fd1498Szrj	    return false;
514*38fd1498Szrj	  }
515*38fd1498Szrj	return true;
516*38fd1498Szrj      }
517*38fd1498Szrj
518*38fd1498Szrj    // Shared ownership
519*38fd1498Szrj
520*38fd1498Szrj    template <typename _Clock, typename _Duration>
521*38fd1498Szrj      bool
522*38fd1498Szrj      try_lock_shared_until(const chrono::time_point<_Clock,
523*38fd1498Szrj						     _Duration>& __abs_time)
524*38fd1498Szrj      {
525*38fd1498Szrj	unique_lock<mutex> __lk(_M_mut);
526*38fd1498Szrj	if (!_M_gate1.wait_until(__lk, __abs_time,
527*38fd1498Szrj				 [=]{ return _M_state < _S_max_readers; }))
528*38fd1498Szrj	  {
529*38fd1498Szrj	    return false;
530*38fd1498Szrj	  }
531*38fd1498Szrj	++_M_state;
532*38fd1498Szrj	return true;
533*38fd1498Szrj      }
534*38fd1498Szrj
535*38fd1498Szrj#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
536*38fd1498Szrj  };
537*38fd1498Szrj#endif // _GLIBCXX_HAS_GTHREADS
538*38fd1498Szrj
539*38fd1498Szrj  /// shared_lock
540*38fd1498Szrj  template<typename _Mutex>
541*38fd1498Szrj    class shared_lock
542*38fd1498Szrj    {
543*38fd1498Szrj    public:
544*38fd1498Szrj      typedef _Mutex mutex_type;
545*38fd1498Szrj
546*38fd1498Szrj      // Shared locking
547*38fd1498Szrj
548*38fd1498Szrj      shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
549*38fd1498Szrj
550*38fd1498Szrj      explicit
551*38fd1498Szrj      shared_lock(mutex_type& __m)
552*38fd1498Szrj      : _M_pm(std::__addressof(__m)), _M_owns(true)
553*38fd1498Szrj      { __m.lock_shared(); }
554*38fd1498Szrj
555*38fd1498Szrj      shared_lock(mutex_type& __m, defer_lock_t) noexcept
556*38fd1498Szrj      : _M_pm(std::__addressof(__m)), _M_owns(false) { }
557*38fd1498Szrj
558*38fd1498Szrj      shared_lock(mutex_type& __m, try_to_lock_t)
559*38fd1498Szrj      : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
560*38fd1498Szrj
561*38fd1498Szrj      shared_lock(mutex_type& __m, adopt_lock_t)
562*38fd1498Szrj      : _M_pm(std::__addressof(__m)), _M_owns(true) { }
563*38fd1498Szrj
564*38fd1498Szrj      template<typename _Clock, typename _Duration>
565*38fd1498Szrj	shared_lock(mutex_type& __m,
566*38fd1498Szrj		    const chrono::time_point<_Clock, _Duration>& __abs_time)
567*38fd1498Szrj      : _M_pm(std::__addressof(__m)),
568*38fd1498Szrj	_M_owns(__m.try_lock_shared_until(__abs_time)) { }
569*38fd1498Szrj
570*38fd1498Szrj      template<typename _Rep, typename _Period>
571*38fd1498Szrj	shared_lock(mutex_type& __m,
572*38fd1498Szrj		    const chrono::duration<_Rep, _Period>& __rel_time)
573*38fd1498Szrj      : _M_pm(std::__addressof(__m)),
574*38fd1498Szrj	_M_owns(__m.try_lock_shared_for(__rel_time)) { }
575*38fd1498Szrj
576*38fd1498Szrj      ~shared_lock()
577*38fd1498Szrj      {
578*38fd1498Szrj	if (_M_owns)
579*38fd1498Szrj	  _M_pm->unlock_shared();
580*38fd1498Szrj      }
581*38fd1498Szrj
582*38fd1498Szrj      shared_lock(shared_lock const&) = delete;
583*38fd1498Szrj      shared_lock& operator=(shared_lock const&) = delete;
584*38fd1498Szrj
585*38fd1498Szrj      shared_lock(shared_lock&& __sl) noexcept : shared_lock()
586*38fd1498Szrj      { swap(__sl); }
587*38fd1498Szrj
588*38fd1498Szrj      shared_lock&
589*38fd1498Szrj      operator=(shared_lock&& __sl) noexcept
590*38fd1498Szrj      {
591*38fd1498Szrj	shared_lock(std::move(__sl)).swap(*this);
592*38fd1498Szrj	return *this;
593*38fd1498Szrj      }
594*38fd1498Szrj
595*38fd1498Szrj      void
596*38fd1498Szrj      lock()
597*38fd1498Szrj      {
598*38fd1498Szrj	_M_lockable();
599*38fd1498Szrj	_M_pm->lock_shared();
600*38fd1498Szrj	_M_owns = true;
601*38fd1498Szrj      }
602*38fd1498Szrj
603*38fd1498Szrj      bool
604*38fd1498Szrj      try_lock()
605*38fd1498Szrj      {
606*38fd1498Szrj	_M_lockable();
607*38fd1498Szrj	return _M_owns = _M_pm->try_lock_shared();
608*38fd1498Szrj      }
609*38fd1498Szrj
610*38fd1498Szrj      template<typename _Rep, typename _Period>
611*38fd1498Szrj	bool
612*38fd1498Szrj	try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
613*38fd1498Szrj	{
614*38fd1498Szrj	  _M_lockable();
615*38fd1498Szrj	  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
616*38fd1498Szrj	}
617*38fd1498Szrj
618*38fd1498Szrj      template<typename _Clock, typename _Duration>
619*38fd1498Szrj	bool
620*38fd1498Szrj	try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
621*38fd1498Szrj	{
622*38fd1498Szrj	  _M_lockable();
623*38fd1498Szrj	  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
624*38fd1498Szrj	}
625*38fd1498Szrj
626*38fd1498Szrj      void
627*38fd1498Szrj      unlock()
628*38fd1498Szrj      {
629*38fd1498Szrj	if (!_M_owns)
630*38fd1498Szrj	  __throw_system_error(int(errc::resource_deadlock_would_occur));
631*38fd1498Szrj	_M_pm->unlock_shared();
632*38fd1498Szrj	_M_owns = false;
633*38fd1498Szrj      }
634*38fd1498Szrj
635*38fd1498Szrj      // Setters
636*38fd1498Szrj
637*38fd1498Szrj      void
638*38fd1498Szrj      swap(shared_lock& __u) noexcept
639*38fd1498Szrj      {
640*38fd1498Szrj	std::swap(_M_pm, __u._M_pm);
641*38fd1498Szrj	std::swap(_M_owns, __u._M_owns);
642*38fd1498Szrj      }
643*38fd1498Szrj
644*38fd1498Szrj      mutex_type*
645*38fd1498Szrj      release() noexcept
646*38fd1498Szrj      {
647*38fd1498Szrj	_M_owns = false;
648*38fd1498Szrj	return std::exchange(_M_pm, nullptr);
649*38fd1498Szrj      }
650*38fd1498Szrj
651*38fd1498Szrj      // Getters
652*38fd1498Szrj
653*38fd1498Szrj      bool owns_lock() const noexcept { return _M_owns; }
654*38fd1498Szrj
655*38fd1498Szrj      explicit operator bool() const noexcept { return _M_owns; }
656*38fd1498Szrj
657*38fd1498Szrj      mutex_type* mutex() const noexcept { return _M_pm; }
658*38fd1498Szrj
659*38fd1498Szrj    private:
660*38fd1498Szrj      void
661*38fd1498Szrj      _M_lockable() const
662*38fd1498Szrj      {
663*38fd1498Szrj	if (_M_pm == nullptr)
664*38fd1498Szrj	  __throw_system_error(int(errc::operation_not_permitted));
665*38fd1498Szrj	if (_M_owns)
666*38fd1498Szrj	  __throw_system_error(int(errc::resource_deadlock_would_occur));
667*38fd1498Szrj      }
668*38fd1498Szrj
669*38fd1498Szrj      mutex_type*	_M_pm;
670*38fd1498Szrj      bool		_M_owns;
671*38fd1498Szrj    };
672*38fd1498Szrj
673*38fd1498Szrj  /// Swap specialization for shared_lock
674*38fd1498Szrj  template<typename _Mutex>
675*38fd1498Szrj    void
676*38fd1498Szrj    swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
677*38fd1498Szrj    { __x.swap(__y); }
678*38fd1498Szrj
679*38fd1498Szrj#endif // _GLIBCXX_USE_C99_STDINT_TR1
680*38fd1498Szrj
681*38fd1498Szrj  // @} group mutexes
682*38fd1498Szrj_GLIBCXX_END_NAMESPACE_VERSION
683*38fd1498Szrj} // namespace
684*38fd1498Szrj
685*38fd1498Szrj#endif // C++14
686*38fd1498Szrj
687*38fd1498Szrj#endif // _GLIBCXX_SHARED_MUTEX
688