xref: /netbsd-src/external/gpl3/gcc/dist/libstdc++-v3/include/std/mutex (revision 4fe0f936ff464bca8e6277bde90f477ef5a4d004)
14fee23f9Smrg// <mutex> -*- C++ -*-
24fee23f9Smrg
3e9e6e0f6Smrg// Copyright (C) 2003-2022 Free Software Foundation, Inc.
44fee23f9Smrg//
54fee23f9Smrg// This file is part of the GNU ISO C++ Library.  This library is free
64fee23f9Smrg// software; you can redistribute it and/or modify it under the
74fee23f9Smrg// terms of the GNU General Public License as published by the
84fee23f9Smrg// Free Software Foundation; either version 3, or (at your option)
94fee23f9Smrg// any later version.
104fee23f9Smrg
114fee23f9Smrg// This library is distributed in the hope that it will be useful,
124fee23f9Smrg// but WITHOUT ANY WARRANTY; without even the implied warranty of
134fee23f9Smrg// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
144fee23f9Smrg// GNU General Public License for more details.
154fee23f9Smrg
164fee23f9Smrg// Under Section 7 of GPL version 3, you are granted additional
174fee23f9Smrg// permissions described in the GCC Runtime Library Exception, version
184fee23f9Smrg// 3.1, as published by the Free Software Foundation.
194fee23f9Smrg
204fee23f9Smrg// You should have received a copy of the GNU General Public License and
214fee23f9Smrg// a copy of the GCC Runtime Library Exception along with this program;
224fee23f9Smrg// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
234fee23f9Smrg// <http://www.gnu.org/licenses/>.
244fee23f9Smrg
2548fb7bfaSmrg/** @file include/mutex
264fee23f9Smrg *  This is a Standard C++ Library header.
274fee23f9Smrg */
284fee23f9Smrg
294fee23f9Smrg#ifndef _GLIBCXX_MUTEX
304fee23f9Smrg#define _GLIBCXX_MUTEX 1
314fee23f9Smrg
324fee23f9Smrg#pragma GCC system_header
334fee23f9Smrg
3448fb7bfaSmrg#if __cplusplus < 201103L
354fee23f9Smrg# include <bits/c++0x_warning.h>
364fee23f9Smrg#else
374fee23f9Smrg
384fee23f9Smrg#include <tuple>
394fee23f9Smrg#include <exception>
404fee23f9Smrg#include <type_traits>
414fee23f9Smrg#include <system_error>
42e9e6e0f6Smrg#include <bits/chrono.h>
43cdbfa754Smrg#include <bits/std_mutex.h>
44654d12c0Smrg#include <bits/unique_lock.h>
45cdbfa754Smrg#if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
46cdbfa754Smrg# include <condition_variable>
47cdbfa754Smrg# include <thread>
48cdbfa754Smrg#endif
49e9e6e0f6Smrg#include <ext/atomicity.h>     // __gnu_cxx::__is_single_threaded
50e9e6e0f6Smrg
51e9e6e0f6Smrg#if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
52e9e6e0f6Smrg# include <bits/std_function.h>  // std::function
53a41324a9Smrg#endif
544fee23f9Smrg
5548fb7bfaSmrgnamespace std _GLIBCXX_VISIBILITY(default)
564fee23f9Smrg{
5748fb7bfaSmrg_GLIBCXX_BEGIN_NAMESPACE_VERSION
5848fb7bfaSmrg
59cdbfa754Smrg  /**
6043265c03Smrg   * @addtogroup mutexes
61cdbfa754Smrg   * @{
62cdbfa754Smrg   */
63cdbfa754Smrg
6448fb7bfaSmrg#ifdef _GLIBCXX_HAS_GTHREADS
65*4fe0f936Smrg  /// @cond undocumented
6648fb7bfaSmrg
674d5abbe8Smrg  // Common base class for std::recursive_mutex and std::recursive_timed_mutex
6848fb7bfaSmrg  class __recursive_mutex_base
6948fb7bfaSmrg  {
7048fb7bfaSmrg  protected:
7148fb7bfaSmrg    typedef __gthread_recursive_mutex_t		__native_type;
7248fb7bfaSmrg
7348fb7bfaSmrg    __recursive_mutex_base(const __recursive_mutex_base&) = delete;
7448fb7bfaSmrg    __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
7548fb7bfaSmrg
7648fb7bfaSmrg#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
7748fb7bfaSmrg    __native_type  _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
7848fb7bfaSmrg
7948fb7bfaSmrg    __recursive_mutex_base() = default;
8048fb7bfaSmrg#else
8148fb7bfaSmrg    __native_type  _M_mutex;
8248fb7bfaSmrg
8348fb7bfaSmrg    __recursive_mutex_base()
8448fb7bfaSmrg    {
8548fb7bfaSmrg      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
8648fb7bfaSmrg      __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
8748fb7bfaSmrg    }
8848fb7bfaSmrg
8948fb7bfaSmrg    ~__recursive_mutex_base()
9048fb7bfaSmrg    { __gthread_recursive_mutex_destroy(&_M_mutex); }
9148fb7bfaSmrg#endif
9248fb7bfaSmrg  };
93*4fe0f936Smrg  /// @endcond
9448fb7bfaSmrg
95*4fe0f936Smrg  /** The standard recursive mutex type.
96*4fe0f936Smrg   *
97*4fe0f936Smrg   * A recursive mutex can be locked more than once by the same thread.
98*4fe0f936Smrg   * Other threads cannot lock the mutex until the owning thread unlocks it
99*4fe0f936Smrg   * as many times as it was locked.
100*4fe0f936Smrg   *
101*4fe0f936Smrg   * @headerfile mutex
102*4fe0f936Smrg   * @since C++11
103*4fe0f936Smrg   */
10448fb7bfaSmrg  class recursive_mutex : private __recursive_mutex_base
1054fee23f9Smrg  {
1064fee23f9Smrg  public:
1074fee23f9Smrg    typedef __native_type* 			native_handle_type;
1084fee23f9Smrg
10948fb7bfaSmrg    recursive_mutex() = default;
11048fb7bfaSmrg    ~recursive_mutex() = default;
1114fee23f9Smrg
1124fee23f9Smrg    recursive_mutex(const recursive_mutex&) = delete;
1134fee23f9Smrg    recursive_mutex& operator=(const recursive_mutex&) = delete;
1144fee23f9Smrg
1154fee23f9Smrg    void
1164fee23f9Smrg    lock()
1174fee23f9Smrg    {
1184fee23f9Smrg      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
1194fee23f9Smrg
1204fee23f9Smrg      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
1214fee23f9Smrg      if (__e)
1224fee23f9Smrg	__throw_system_error(__e);
1234fee23f9Smrg    }
1244fee23f9Smrg
1254fee23f9Smrg    bool
12648fb7bfaSmrg    try_lock() noexcept
1274fee23f9Smrg    {
1284fee23f9Smrg      // XXX EINVAL, EAGAIN, EBUSY
1294fee23f9Smrg      return !__gthread_recursive_mutex_trylock(&_M_mutex);
1304fee23f9Smrg    }
1314fee23f9Smrg
1324fee23f9Smrg    void
1334fee23f9Smrg    unlock()
1344fee23f9Smrg    {
1354fee23f9Smrg      // XXX EINVAL, EAGAIN, EBUSY
1364fee23f9Smrg      __gthread_recursive_mutex_unlock(&_M_mutex);
1374fee23f9Smrg    }
1384fee23f9Smrg
1394fee23f9Smrg    native_handle_type
140a41324a9Smrg    native_handle() noexcept
1414fee23f9Smrg    { return &_M_mutex; }
1424fee23f9Smrg  };
1434fee23f9Smrg
14448fb7bfaSmrg#if _GTHREAD_USE_MUTEX_TIMEDLOCK
145*4fe0f936Smrg  /// @cond undocumented
146*4fe0f936Smrg
1474d5abbe8Smrg  template<typename _Derived>
1484d5abbe8Smrg    class __timed_mutex_impl
1494fee23f9Smrg    {
1504d5abbe8Smrg    protected:
1514d5abbe8Smrg      template<typename _Rep, typename _Period>
1524d5abbe8Smrg	bool
1534d5abbe8Smrg	_M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
1544d5abbe8Smrg	{
15543265c03Smrg#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
15643265c03Smrg	  using __clock = chrono::steady_clock;
15743265c03Smrg#else
15843265c03Smrg	  using __clock = chrono::system_clock;
15943265c03Smrg#endif
16043265c03Smrg
16143265c03Smrg	  auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
16243265c03Smrg	  if (ratio_greater<__clock::period, _Period>())
1634d5abbe8Smrg	    ++__rt;
16443265c03Smrg	  return _M_try_lock_until(__clock::now() + __rt);
1654d5abbe8Smrg	}
1664d5abbe8Smrg
1674d5abbe8Smrg      template<typename _Duration>
1684d5abbe8Smrg	bool
16943265c03Smrg	_M_try_lock_until(const chrono::time_point<chrono::system_clock,
1704d5abbe8Smrg						   _Duration>& __atime)
1714d5abbe8Smrg	{
1724d5abbe8Smrg	  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
1734d5abbe8Smrg	  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
1744d5abbe8Smrg
1754d5abbe8Smrg	  __gthread_time_t __ts = {
1764d5abbe8Smrg	    static_cast<std::time_t>(__s.time_since_epoch().count()),
1774d5abbe8Smrg	    static_cast<long>(__ns.count())
1784d5abbe8Smrg	  };
1794d5abbe8Smrg
180cdbfa754Smrg	  return static_cast<_Derived*>(this)->_M_timedlock(__ts);
1814d5abbe8Smrg	}
1824d5abbe8Smrg
18343265c03Smrg#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
18443265c03Smrg      template<typename _Duration>
18543265c03Smrg	bool
18643265c03Smrg	_M_try_lock_until(const chrono::time_point<chrono::steady_clock,
18743265c03Smrg						   _Duration>& __atime)
18843265c03Smrg	{
18943265c03Smrg	  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
19043265c03Smrg	  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
19143265c03Smrg
19243265c03Smrg	  __gthread_time_t __ts = {
19343265c03Smrg	    static_cast<std::time_t>(__s.time_since_epoch().count()),
19443265c03Smrg	    static_cast<long>(__ns.count())
19543265c03Smrg	  };
19643265c03Smrg
19743265c03Smrg	  return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
19843265c03Smrg							    __ts);
19943265c03Smrg	}
20043265c03Smrg#endif
20143265c03Smrg
2024d5abbe8Smrg      template<typename _Clock, typename _Duration>
2034d5abbe8Smrg	bool
2044d5abbe8Smrg	_M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
2054d5abbe8Smrg	{
20643265c03Smrg#if __cplusplus > 201703L
20743265c03Smrg	  static_assert(chrono::is_clock_v<_Clock>);
20843265c03Smrg#endif
20943265c03Smrg	  // The user-supplied clock may not tick at the same rate as
21043265c03Smrg	  // steady_clock, so we must loop in order to guarantee that
21143265c03Smrg	  // the timeout has expired before returning false.
21243265c03Smrg	  auto __now = _Clock::now();
21343265c03Smrg	  do {
21443265c03Smrg	    auto __rtime = __atime - __now;
21543265c03Smrg	    if (_M_try_lock_for(__rtime))
21643265c03Smrg	      return true;
21743265c03Smrg	    __now = _Clock::now();
21843265c03Smrg	  } while (__atime > __now);
21943265c03Smrg	  return false;
2204d5abbe8Smrg	}
2214d5abbe8Smrg    };
222*4fe0f936Smrg  /// @endcond
2234d5abbe8Smrg
224*4fe0f936Smrg  /** The standard timed mutex type.
225*4fe0f936Smrg   *
226*4fe0f936Smrg   * A non-recursive mutex that supports a timeout when trying to acquire the
227*4fe0f936Smrg   * lock.
228*4fe0f936Smrg   *
229*4fe0f936Smrg   * @headerfile mutex
230*4fe0f936Smrg   * @since C++11
231*4fe0f936Smrg   */
2324d5abbe8Smrg  class timed_mutex
2334d5abbe8Smrg  : private __mutex_base, public __timed_mutex_impl<timed_mutex>
2344d5abbe8Smrg  {
2354fee23f9Smrg  public:
2364fee23f9Smrg    typedef __native_type* 		  	native_handle_type;
2374fee23f9Smrg
23848fb7bfaSmrg    timed_mutex() = default;
23948fb7bfaSmrg    ~timed_mutex() = default;
2404fee23f9Smrg
2414fee23f9Smrg    timed_mutex(const timed_mutex&) = delete;
2424fee23f9Smrg    timed_mutex& operator=(const timed_mutex&) = delete;
2434fee23f9Smrg
2444fee23f9Smrg    void
2454fee23f9Smrg    lock()
2464fee23f9Smrg    {
2474fee23f9Smrg      int __e = __gthread_mutex_lock(&_M_mutex);
2484fee23f9Smrg
2494fee23f9Smrg      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
2504fee23f9Smrg      if (__e)
2514fee23f9Smrg	__throw_system_error(__e);
2524fee23f9Smrg    }
2534fee23f9Smrg
2544fee23f9Smrg    bool
25548fb7bfaSmrg    try_lock() noexcept
2564fee23f9Smrg    {
2574fee23f9Smrg      // XXX EINVAL, EAGAIN, EBUSY
2584fee23f9Smrg      return !__gthread_mutex_trylock(&_M_mutex);
2594fee23f9Smrg    }
2604fee23f9Smrg
2614fee23f9Smrg    template <class _Rep, class _Period>
2624fee23f9Smrg      bool
2634fee23f9Smrg      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
26448fb7bfaSmrg      { return _M_try_lock_for(__rtime); }
2654fee23f9Smrg
2664fee23f9Smrg    template <class _Clock, class _Duration>
2674fee23f9Smrg      bool
2684fee23f9Smrg      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
26948fb7bfaSmrg      { return _M_try_lock_until(__atime); }
2704fee23f9Smrg
2714fee23f9Smrg    void
2724fee23f9Smrg    unlock()
2734fee23f9Smrg    {
2744fee23f9Smrg      // XXX EINVAL, EAGAIN, EBUSY
2754fee23f9Smrg      __gthread_mutex_unlock(&_M_mutex);
2764fee23f9Smrg    }
2774fee23f9Smrg
2784fee23f9Smrg    native_handle_type
279a41324a9Smrg    native_handle() noexcept
2804fee23f9Smrg    { return &_M_mutex; }
281cdbfa754Smrg
282cdbfa754Smrg    private:
283cdbfa754Smrg      friend class __timed_mutex_impl<timed_mutex>;
284cdbfa754Smrg
285cdbfa754Smrg      bool
286cdbfa754Smrg      _M_timedlock(const __gthread_time_t& __ts)
287cdbfa754Smrg      { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
28843265c03Smrg
28943265c03Smrg#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
29043265c03Smrg      bool
291*4fe0f936Smrg      _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
292*4fe0f936Smrg      { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
29343265c03Smrg#endif
2944fee23f9Smrg  };
2954fee23f9Smrg
296*4fe0f936Smrg  /** The standard recursive timed mutex type.
297*4fe0f936Smrg   *
298*4fe0f936Smrg   * A recursive mutex that supports a timeout when trying to acquire the
299*4fe0f936Smrg   * lock. A recursive mutex can be locked more than once by the same thread.
300*4fe0f936Smrg   * Other threads cannot lock the mutex until the owning thread unlocks it
301*4fe0f936Smrg   * as many times as it was locked.
302*4fe0f936Smrg   *
303*4fe0f936Smrg   * @headerfile mutex
304*4fe0f936Smrg   * @since C++11
305*4fe0f936Smrg   */
3064d5abbe8Smrg  class recursive_timed_mutex
3074d5abbe8Smrg  : private __recursive_mutex_base,
3084d5abbe8Smrg    public __timed_mutex_impl<recursive_timed_mutex>
3094fee23f9Smrg  {
3104fee23f9Smrg  public:
3114fee23f9Smrg    typedef __native_type* 			native_handle_type;
3124fee23f9Smrg
31348fb7bfaSmrg    recursive_timed_mutex() = default;
31448fb7bfaSmrg    ~recursive_timed_mutex() = default;
3154fee23f9Smrg
3164fee23f9Smrg    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
3174fee23f9Smrg    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
3184fee23f9Smrg
3194fee23f9Smrg    void
3204fee23f9Smrg    lock()
3214fee23f9Smrg    {
3224fee23f9Smrg      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
3234fee23f9Smrg
3244fee23f9Smrg      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
3254fee23f9Smrg      if (__e)
3264fee23f9Smrg	__throw_system_error(__e);
3274fee23f9Smrg    }
3284fee23f9Smrg
3294fee23f9Smrg    bool
33048fb7bfaSmrg    try_lock() noexcept
3314fee23f9Smrg    {
3324fee23f9Smrg      // XXX EINVAL, EAGAIN, EBUSY
3334fee23f9Smrg      return !__gthread_recursive_mutex_trylock(&_M_mutex);
3344fee23f9Smrg    }
3354fee23f9Smrg
3364fee23f9Smrg    template <class _Rep, class _Period>
3374fee23f9Smrg      bool
3384fee23f9Smrg      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
33948fb7bfaSmrg      { return _M_try_lock_for(__rtime); }
3404fee23f9Smrg
3414fee23f9Smrg    template <class _Clock, class _Duration>
3424fee23f9Smrg      bool
3434fee23f9Smrg      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
34448fb7bfaSmrg      { return _M_try_lock_until(__atime); }
3454fee23f9Smrg
3464fee23f9Smrg    void
3474fee23f9Smrg    unlock()
3484fee23f9Smrg    {
3494fee23f9Smrg      // XXX EINVAL, EAGAIN, EBUSY
3504fee23f9Smrg      __gthread_recursive_mutex_unlock(&_M_mutex);
3514fee23f9Smrg    }
3524fee23f9Smrg
3534fee23f9Smrg    native_handle_type
354a41324a9Smrg    native_handle() noexcept
3554fee23f9Smrg    { return &_M_mutex; }
3564fee23f9Smrg
3574fee23f9Smrg    private:
358cdbfa754Smrg      friend class __timed_mutex_impl<recursive_timed_mutex>;
359cdbfa754Smrg
360cdbfa754Smrg      bool
361cdbfa754Smrg      _M_timedlock(const __gthread_time_t& __ts)
362cdbfa754Smrg      { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
36343265c03Smrg
36443265c03Smrg#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
36543265c03Smrg      bool
366*4fe0f936Smrg      _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
367*4fe0f936Smrg      { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
36843265c03Smrg#endif
3694fee23f9Smrg  };
3704fee23f9Smrg
371cdbfa754Smrg#else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
372cdbfa754Smrg
373cdbfa754Smrg  /// timed_mutex
374cdbfa754Smrg  class timed_mutex
3754fee23f9Smrg  {
376cdbfa754Smrg    mutex		_M_mut;
377cdbfa754Smrg    condition_variable	_M_cv;
378cdbfa754Smrg    bool		_M_locked = false;
379cdbfa754Smrg
3804fee23f9Smrg  public:
3814fee23f9Smrg
382cdbfa754Smrg    timed_mutex() = default;
383cdbfa754Smrg    ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
3844fee23f9Smrg
385cdbfa754Smrg    timed_mutex(const timed_mutex&) = delete;
386cdbfa754Smrg    timed_mutex& operator=(const timed_mutex&) = delete;
3874fee23f9Smrg
3884fee23f9Smrg    void
3894fee23f9Smrg    lock()
3904fee23f9Smrg    {
391cdbfa754Smrg      unique_lock<mutex> __lk(_M_mut);
392cdbfa754Smrg      _M_cv.wait(__lk, [&]{ return !_M_locked; });
393cdbfa754Smrg      _M_locked = true;
3944fee23f9Smrg    }
3954fee23f9Smrg
3964fee23f9Smrg    bool
3974fee23f9Smrg    try_lock()
3984fee23f9Smrg    {
399cdbfa754Smrg      lock_guard<mutex> __lk(_M_mut);
400cdbfa754Smrg      if (_M_locked)
401cdbfa754Smrg	return false;
402cdbfa754Smrg      _M_locked = true;
403cdbfa754Smrg      return true;
4044fee23f9Smrg    }
4054fee23f9Smrg
4064fee23f9Smrg    template<typename _Rep, typename _Period>
4074fee23f9Smrg      bool
4084fee23f9Smrg      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
4094fee23f9Smrg      {
410cdbfa754Smrg	unique_lock<mutex> __lk(_M_mut);
411cdbfa754Smrg	if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
412cdbfa754Smrg	  return false;
413cdbfa754Smrg	_M_locked = true;
414cdbfa754Smrg	return true;
4154fee23f9Smrg      }
416cdbfa754Smrg
417cdbfa754Smrg    template<typename _Clock, typename _Duration>
418cdbfa754Smrg      bool
419cdbfa754Smrg      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
420cdbfa754Smrg      {
421cdbfa754Smrg	unique_lock<mutex> __lk(_M_mut);
422cdbfa754Smrg	if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
423cdbfa754Smrg	  return false;
424cdbfa754Smrg	_M_locked = true;
425cdbfa754Smrg	return true;
4264fee23f9Smrg      }
4274fee23f9Smrg
4284fee23f9Smrg    void
4294fee23f9Smrg    unlock()
4304fee23f9Smrg    {
431cdbfa754Smrg      lock_guard<mutex> __lk(_M_mut);
432cdbfa754Smrg      __glibcxx_assert( _M_locked );
433cdbfa754Smrg      _M_locked = false;
434cdbfa754Smrg      _M_cv.notify_one();
435cdbfa754Smrg    }
436cdbfa754Smrg  };
437cdbfa754Smrg
438cdbfa754Smrg  /// recursive_timed_mutex
439cdbfa754Smrg  class recursive_timed_mutex
4404fee23f9Smrg  {
441cdbfa754Smrg    mutex		_M_mut;
442cdbfa754Smrg    condition_variable	_M_cv;
443cdbfa754Smrg    thread::id		_M_owner;
444cdbfa754Smrg    unsigned		_M_count = 0;
445cdbfa754Smrg
446cdbfa754Smrg    // Predicate type that tests whether the current thread can lock a mutex.
447cdbfa754Smrg    struct _Can_lock
448cdbfa754Smrg    {
449cdbfa754Smrg      // Returns true if the mutex is unlocked or is locked by _M_caller.
450cdbfa754Smrg      bool
451cdbfa754Smrg      operator()() const noexcept
452cdbfa754Smrg      { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
453cdbfa754Smrg
454cdbfa754Smrg      const recursive_timed_mutex* _M_mx;
455cdbfa754Smrg      thread::id _M_caller;
456cdbfa754Smrg    };
457cdbfa754Smrg
458cdbfa754Smrg  public:
459cdbfa754Smrg
460cdbfa754Smrg    recursive_timed_mutex() = default;
461cdbfa754Smrg    ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
462cdbfa754Smrg
463cdbfa754Smrg    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
464cdbfa754Smrg    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
4654fee23f9Smrg
4664fee23f9Smrg    void
467cdbfa754Smrg    lock()
4684fee23f9Smrg    {
469cdbfa754Smrg      auto __id = this_thread::get_id();
470cdbfa754Smrg      _Can_lock __can_lock{this, __id};
471cdbfa754Smrg      unique_lock<mutex> __lk(_M_mut);
472cdbfa754Smrg      _M_cv.wait(__lk, __can_lock);
473cdbfa754Smrg      if (_M_count == -1u)
474cdbfa754Smrg	__throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
475cdbfa754Smrg      _M_owner = __id;
476cdbfa754Smrg      ++_M_count;
4774fee23f9Smrg    }
4784fee23f9Smrg
4794fee23f9Smrg    bool
480cdbfa754Smrg    try_lock()
481cdbfa754Smrg    {
482cdbfa754Smrg      auto __id = this_thread::get_id();
483cdbfa754Smrg      _Can_lock __can_lock{this, __id};
484cdbfa754Smrg      lock_guard<mutex> __lk(_M_mut);
485cdbfa754Smrg      if (!__can_lock())
486cdbfa754Smrg	return false;
487cdbfa754Smrg      if (_M_count == -1u)
488cdbfa754Smrg	return false;
489cdbfa754Smrg      _M_owner = __id;
490cdbfa754Smrg      ++_M_count;
491cdbfa754Smrg      return true;
492cdbfa754Smrg    }
4934fee23f9Smrg
494cdbfa754Smrg    template<typename _Rep, typename _Period>
495cdbfa754Smrg      bool
496cdbfa754Smrg      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
497cdbfa754Smrg      {
498cdbfa754Smrg	auto __id = this_thread::get_id();
499cdbfa754Smrg	_Can_lock __can_lock{this, __id};
500cdbfa754Smrg	unique_lock<mutex> __lk(_M_mut);
501cdbfa754Smrg	if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
502cdbfa754Smrg	  return false;
503cdbfa754Smrg	if (_M_count == -1u)
504cdbfa754Smrg	  return false;
505cdbfa754Smrg	_M_owner = __id;
506cdbfa754Smrg	++_M_count;
507cdbfa754Smrg	return true;
508cdbfa754Smrg      }
5094fee23f9Smrg
510cdbfa754Smrg    template<typename _Clock, typename _Duration>
511cdbfa754Smrg      bool
512cdbfa754Smrg      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
513cdbfa754Smrg      {
514cdbfa754Smrg	auto __id = this_thread::get_id();
515cdbfa754Smrg	_Can_lock __can_lock{this, __id};
516cdbfa754Smrg	unique_lock<mutex> __lk(_M_mut);
517cdbfa754Smrg	if (!_M_cv.wait_until(__lk, __atime, __can_lock))
518cdbfa754Smrg	  return false;
519cdbfa754Smrg	if (_M_count == -1u)
520cdbfa754Smrg	  return false;
521cdbfa754Smrg	_M_owner = __id;
522cdbfa754Smrg	++_M_count;
523cdbfa754Smrg	return true;
524cdbfa754Smrg      }
5254fee23f9Smrg
526cdbfa754Smrg    void
527cdbfa754Smrg    unlock()
528cdbfa754Smrg    {
529cdbfa754Smrg      lock_guard<mutex> __lk(_M_mut);
530cdbfa754Smrg      __glibcxx_assert( _M_owner == this_thread::get_id() );
531cdbfa754Smrg      __glibcxx_assert( _M_count > 0 );
532cdbfa754Smrg      if (--_M_count == 0)
533cdbfa754Smrg	{
534cdbfa754Smrg	  _M_owner = {};
535cdbfa754Smrg	  _M_cv.notify_one();
536cdbfa754Smrg	}
537cdbfa754Smrg    }
5384fee23f9Smrg  };
5394fee23f9Smrg
540cdbfa754Smrg#endif
541cdbfa754Smrg#endif // _GLIBCXX_HAS_GTHREADS
5424fee23f9Smrg
54343265c03Smrg  /// @cond undocumented
544e9e6e0f6Smrg  namespace __detail
545e9e6e0f6Smrg  {
546e9e6e0f6Smrg    // Lock the last lockable, after all previous ones are locked.
547e9e6e0f6Smrg    template<typename _Lockable>
548e9e6e0f6Smrg      inline int
549e9e6e0f6Smrg      __try_lock_impl(_Lockable& __l)
550e9e6e0f6Smrg      {
551e9e6e0f6Smrg	if (unique_lock<_Lockable> __lock{__l, try_to_lock})
552e9e6e0f6Smrg	  {
553e9e6e0f6Smrg	    __lock.release();
554e9e6e0f6Smrg	    return -1;
555e9e6e0f6Smrg	  }
556e9e6e0f6Smrg	else
557e9e6e0f6Smrg	  return 0;
558e9e6e0f6Smrg      }
55948fb7bfaSmrg
560e9e6e0f6Smrg    // Lock each lockable in turn.
561e9e6e0f6Smrg    // Use iteration if all lockables are the same type, recursion otherwise.
562e9e6e0f6Smrg    template<typename _L0, typename... _Lockables>
563e9e6e0f6Smrg      inline int
564e9e6e0f6Smrg      __try_lock_impl(_L0& __l0, _Lockables&... __lockables)
5654fee23f9Smrg      {
566e9e6e0f6Smrg#if __cplusplus >= 201703L
567e9e6e0f6Smrg	if constexpr ((is_same_v<_L0, _Lockables> && ...))
5684fee23f9Smrg	  {
569e9e6e0f6Smrg	    constexpr int _Np = 1 + sizeof...(_Lockables);
570e9e6e0f6Smrg	    unique_lock<_L0> __locks[_Np] = {
571e9e6e0f6Smrg		{__l0, defer_lock}, {__lockables, defer_lock}...
572e9e6e0f6Smrg	    };
573e9e6e0f6Smrg	    for (int __i = 0; __i < _Np; ++__i)
5744fee23f9Smrg	      {
575e9e6e0f6Smrg		if (!__locks[__i].try_lock())
576e9e6e0f6Smrg		  {
577e9e6e0f6Smrg		    const int __failed = __i;
578e9e6e0f6Smrg		    while (__i--)
579e9e6e0f6Smrg		      __locks[__i].unlock();
580e9e6e0f6Smrg		    return __failed;
581e9e6e0f6Smrg		  }
582e9e6e0f6Smrg	      }
583e9e6e0f6Smrg	    for (auto& __l : __locks)
584e9e6e0f6Smrg	      __l.release();
585e9e6e0f6Smrg	    return -1;
586e9e6e0f6Smrg	  }
587e9e6e0f6Smrg	else
588e9e6e0f6Smrg#endif
589e9e6e0f6Smrg	if (unique_lock<_L0> __lock{__l0, try_to_lock})
590e9e6e0f6Smrg	  {
591e9e6e0f6Smrg	    int __idx = __detail::__try_lock_impl(__lockables...);
59248fb7bfaSmrg	    if (__idx == -1)
593e9e6e0f6Smrg	      {
59448fb7bfaSmrg		__lock.release();
595e9e6e0f6Smrg		return -1;
5964fee23f9Smrg	      }
597e9e6e0f6Smrg	    return __idx + 1;
5984fee23f9Smrg	  }
599e9e6e0f6Smrg	else
600e9e6e0f6Smrg	  return 0;
601e9e6e0f6Smrg      }
6024fee23f9Smrg
603e9e6e0f6Smrg  } // namespace __detail
60443265c03Smrg  /// @endcond
6054fee23f9Smrg
6064fee23f9Smrg  /** @brief Generic try_lock.
6073f8cba22Smrg   *  @param __l1 Meets Lockable requirements (try_lock() may throw).
6083f8cba22Smrg   *  @param __l2 Meets Lockable requirements (try_lock() may throw).
6093f8cba22Smrg   *  @param __l3 Meets Lockable requirements (try_lock() may throw).
6104fee23f9Smrg   *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
6114fee23f9Smrg   *          a 0-based index corresponding to the argument that returned false.
6124fee23f9Smrg   *  @post Either all arguments are locked, or none will be.
6134fee23f9Smrg   *
6144fee23f9Smrg   *  Sequentially calls try_lock() on each argument.
6154fee23f9Smrg   */
616e9e6e0f6Smrg  template<typename _L1, typename _L2, typename... _L3>
617e9e6e0f6Smrg    inline int
618e9e6e0f6Smrg    try_lock(_L1& __l1, _L2& __l2, _L3&... __l3)
6194fee23f9Smrg    {
620e9e6e0f6Smrg      return __detail::__try_lock_impl(__l1, __l2, __l3...);
6214fee23f9Smrg    }
6224fee23f9Smrg
623e9e6e0f6Smrg  /// @cond undocumented
624e9e6e0f6Smrg  namespace __detail
625e9e6e0f6Smrg  {
626e9e6e0f6Smrg    // This function can recurse up to N levels deep, for N = 1+sizeof...(L1).
627e9e6e0f6Smrg    // On each recursion the lockables are rotated left one position,
628e9e6e0f6Smrg    // e.g. depth 0: l0, l1, l2; depth 1: l1, l2, l0; depth 2: l2, l0, l1.
629e9e6e0f6Smrg    // When a call to l_i.try_lock() fails it recurses/returns to depth=i
630e9e6e0f6Smrg    // so that l_i is the first argument, and then blocks until l_i is locked.
631e9e6e0f6Smrg    template<typename _L0, typename... _L1>
632e9e6e0f6Smrg      void
633e9e6e0f6Smrg      __lock_impl(int& __i, int __depth, _L0& __l0, _L1&... __l1)
634e9e6e0f6Smrg      {
635e9e6e0f6Smrg	while (__i >= __depth)
636e9e6e0f6Smrg	  {
637e9e6e0f6Smrg	    if (__i == __depth)
638e9e6e0f6Smrg	      {
639e9e6e0f6Smrg		int __failed = 1; // index that couldn't be locked
640e9e6e0f6Smrg		{
641e9e6e0f6Smrg		  unique_lock<_L0> __first(__l0);
642e9e6e0f6Smrg		  __failed += __detail::__try_lock_impl(__l1...);
643e9e6e0f6Smrg		  if (!__failed)
644e9e6e0f6Smrg		    {
645e9e6e0f6Smrg		      __i = -1; // finished
646e9e6e0f6Smrg		      __first.release();
647e9e6e0f6Smrg		      return;
648e9e6e0f6Smrg		    }
649e9e6e0f6Smrg		}
650e9e6e0f6Smrg#if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
651e9e6e0f6Smrg		__gthread_yield();
652e9e6e0f6Smrg#endif
653e9e6e0f6Smrg		constexpr auto __n = 1 + sizeof...(_L1);
654e9e6e0f6Smrg		__i = (__depth + __failed) % __n;
655e9e6e0f6Smrg	      }
656e9e6e0f6Smrg	    else // rotate left until l_i is first.
657e9e6e0f6Smrg	      __detail::__lock_impl(__i, __depth + 1, __l1..., __l0);
658e9e6e0f6Smrg	  }
659e9e6e0f6Smrg      }
660e9e6e0f6Smrg
661e9e6e0f6Smrg  } // namespace __detail
662e9e6e0f6Smrg  /// @endcond
663e9e6e0f6Smrg
66448fb7bfaSmrg  /** @brief Generic lock.
6653f8cba22Smrg   *  @param __l1 Meets Lockable requirements (try_lock() may throw).
6663f8cba22Smrg   *  @param __l2 Meets Lockable requirements (try_lock() may throw).
6673f8cba22Smrg   *  @param __l3 Meets Lockable requirements (try_lock() may throw).
66848fb7bfaSmrg   *  @throw An exception thrown by an argument's lock() or try_lock() member.
66948fb7bfaSmrg   *  @post All arguments are locked.
67048fb7bfaSmrg   *
67148fb7bfaSmrg   *  All arguments are locked via a sequence of calls to lock(), try_lock()
672e9e6e0f6Smrg   *  and unlock().  If this function exits via an exception any locks that
673e9e6e0f6Smrg   *  were obtained will be released.
67448fb7bfaSmrg   */
6754fee23f9Smrg  template<typename _L1, typename _L2, typename... _L3>
6764fee23f9Smrg    void
67748fb7bfaSmrg    lock(_L1& __l1, _L2& __l2, _L3&... __l3)
67848fb7bfaSmrg    {
679e9e6e0f6Smrg#if __cplusplus >= 201703L
680e9e6e0f6Smrg      if constexpr (is_same_v<_L1, _L2> && (is_same_v<_L1, _L3> && ...))
68148fb7bfaSmrg	{
682e9e6e0f6Smrg	  constexpr int _Np = 2 + sizeof...(_L3);
683e9e6e0f6Smrg	  unique_lock<_L1> __locks[] = {
684e9e6e0f6Smrg	      {__l1, defer_lock}, {__l2, defer_lock}, {__l3, defer_lock}...
685e9e6e0f6Smrg	  };
686e9e6e0f6Smrg	  int __first = 0;
687e9e6e0f6Smrg	  do {
688e9e6e0f6Smrg	    __locks[__first].lock();
689e9e6e0f6Smrg	    for (int __j = 1; __j < _Np; ++__j)
69048fb7bfaSmrg	      {
691e9e6e0f6Smrg		const int __idx = (__first + __j) % _Np;
692e9e6e0f6Smrg		if (!__locks[__idx].try_lock())
693e9e6e0f6Smrg		  {
694e9e6e0f6Smrg		    for (int __k = __j; __k != 0; --__k)
695e9e6e0f6Smrg		      __locks[(__first + __k - 1) % _Np].unlock();
696e9e6e0f6Smrg		    __first = __idx;
697e9e6e0f6Smrg		    break;
69848fb7bfaSmrg		  }
69948fb7bfaSmrg	      }
700e9e6e0f6Smrg	  } while (!__locks[__first].owns_lock());
701e9e6e0f6Smrg
702e9e6e0f6Smrg	  for (auto& __l : __locks)
703e9e6e0f6Smrg	    __l.release();
704e9e6e0f6Smrg	}
705e9e6e0f6Smrg      else
706e9e6e0f6Smrg#endif
707e9e6e0f6Smrg	{
708e9e6e0f6Smrg	  int __i = 0;
709e9e6e0f6Smrg	  __detail::__lock_impl(__i, 0, __l1, __l2, __l3...);
710e9e6e0f6Smrg	}
71148fb7bfaSmrg    }
7124fee23f9Smrg
713a41324a9Smrg#if __cplusplus >= 201703L
714e9e6e0f6Smrg#define __cpp_lib_scoped_lock 201703L
715a41324a9Smrg  /** @brief A scoped lock type for multiple lockable objects.
716a41324a9Smrg   *
717a41324a9Smrg   * A scoped_lock controls mutex ownership within a scope, releasing
718a41324a9Smrg   * ownership in the destructor.
719*4fe0f936Smrg   *
720*4fe0f936Smrg   * @headerfile mutex
721*4fe0f936Smrg   * @since C++17
722a41324a9Smrg   */
723a41324a9Smrg  template<typename... _MutexTypes>
724a41324a9Smrg    class scoped_lock
725a41324a9Smrg    {
726a41324a9Smrg    public:
727a41324a9Smrg      explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
728a41324a9Smrg      { std::lock(__m...); }
729a41324a9Smrg
730a41324a9Smrg      explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
731a41324a9Smrg      : _M_devices(std::tie(__m...))
732a41324a9Smrg      { } // calling thread owns mutex
733a41324a9Smrg
734a41324a9Smrg      ~scoped_lock()
73543265c03Smrg      { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
736a41324a9Smrg
737a41324a9Smrg      scoped_lock(const scoped_lock&) = delete;
738a41324a9Smrg      scoped_lock& operator=(const scoped_lock&) = delete;
739a41324a9Smrg
740a41324a9Smrg    private:
741a41324a9Smrg      tuple<_MutexTypes&...> _M_devices;
742a41324a9Smrg    };
743a41324a9Smrg
744a41324a9Smrg  template<>
745a41324a9Smrg    class scoped_lock<>
746a41324a9Smrg    {
747a41324a9Smrg    public:
748a41324a9Smrg      explicit scoped_lock() = default;
749a41324a9Smrg      explicit scoped_lock(adopt_lock_t) noexcept { }
750a41324a9Smrg      ~scoped_lock() = default;
751a41324a9Smrg
752a41324a9Smrg      scoped_lock(const scoped_lock&) = delete;
753a41324a9Smrg      scoped_lock& operator=(const scoped_lock&) = delete;
754a41324a9Smrg    };
755a41324a9Smrg
756a41324a9Smrg  template<typename _Mutex>
757a41324a9Smrg    class scoped_lock<_Mutex>
758a41324a9Smrg    {
759a41324a9Smrg    public:
760a41324a9Smrg      using mutex_type = _Mutex;
761a41324a9Smrg
762a41324a9Smrg      explicit scoped_lock(mutex_type& __m) : _M_device(__m)
763a41324a9Smrg      { _M_device.lock(); }
764a41324a9Smrg
765a41324a9Smrg      explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
766a41324a9Smrg      : _M_device(__m)
767a41324a9Smrg      { } // calling thread owns mutex
768a41324a9Smrg
769a41324a9Smrg      ~scoped_lock()
770a41324a9Smrg      { _M_device.unlock(); }
771a41324a9Smrg
772a41324a9Smrg      scoped_lock(const scoped_lock&) = delete;
773a41324a9Smrg      scoped_lock& operator=(const scoped_lock&) = delete;
774a41324a9Smrg
775a41324a9Smrg    private:
776a41324a9Smrg      mutex_type&  _M_device;
777a41324a9Smrg    };
778a41324a9Smrg#endif // C++17
779a41324a9Smrg
78048fb7bfaSmrg#ifdef _GLIBCXX_HAS_GTHREADS
78143265c03Smrg  /// Flag type used by std::call_once
7824fee23f9Smrg  struct once_flag
7834fee23f9Smrg  {
78448fb7bfaSmrg    constexpr once_flag() noexcept = default;
7854fee23f9Smrg
78648fb7bfaSmrg    /// Deleted copy constructor
7874fee23f9Smrg    once_flag(const once_flag&) = delete;
78848fb7bfaSmrg    /// Deleted assignment operator
7894fee23f9Smrg    once_flag& operator=(const once_flag&) = delete;
7904fee23f9Smrg
791e9e6e0f6Smrg  private:
792e9e6e0f6Smrg    // For gthreads targets a pthread_once_t is used with pthread_once, but
793e9e6e0f6Smrg    // for most targets this doesn't work correctly for exceptional executions.
794e9e6e0f6Smrg    __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
795e9e6e0f6Smrg
796e9e6e0f6Smrg    struct _Prepare_execution;
797e9e6e0f6Smrg
7984fee23f9Smrg    template<typename _Callable, typename... _Args>
7994fee23f9Smrg      friend void
80048fb7bfaSmrg      call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
8014fee23f9Smrg  };
8024fee23f9Smrg
80343265c03Smrg  /// @cond undocumented
80460b1976aSkamil# ifdef _GLIBCXX_HAVE_TLS
805e9e6e0f6Smrg  // If TLS is available use thread-local state for the type-erased callable
806e9e6e0f6Smrg  // that is being run by std::call_once in the current thread.
8074fee23f9Smrg  extern __thread void* __once_callable;
8084fee23f9Smrg  extern __thread void (*__once_call)();
809e9e6e0f6Smrg
810e9e6e0f6Smrg  // RAII type to set up state for pthread_once call.
811e9e6e0f6Smrg  struct once_flag::_Prepare_execution
812e9e6e0f6Smrg  {
813e9e6e0f6Smrg    template<typename _Callable>
814e9e6e0f6Smrg      explicit
815e9e6e0f6Smrg      _Prepare_execution(_Callable& __c)
816e9e6e0f6Smrg      {
817e9e6e0f6Smrg	// Store address in thread-local pointer:
818e9e6e0f6Smrg	__once_callable = std::__addressof(__c);
819e9e6e0f6Smrg	// Trampoline function to invoke the closure via thread-local pointer:
820e9e6e0f6Smrg	__once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
821e9e6e0f6Smrg      }
822e9e6e0f6Smrg
823e9e6e0f6Smrg    ~_Prepare_execution()
824e9e6e0f6Smrg    {
825e9e6e0f6Smrg      // PR libstdc++/82481
826e9e6e0f6Smrg      __once_callable = nullptr;
827e9e6e0f6Smrg      __once_call = nullptr;
828e9e6e0f6Smrg    }
829e9e6e0f6Smrg
830e9e6e0f6Smrg    _Prepare_execution(const _Prepare_execution&) = delete;
831e9e6e0f6Smrg    _Prepare_execution& operator=(const _Prepare_execution&) = delete;
832e9e6e0f6Smrg  };
833e9e6e0f6Smrg
8344fee23f9Smrg# else
835e9e6e0f6Smrg  // Without TLS use a global std::mutex and store the callable in a
836e9e6e0f6Smrg  // global std::function.
8374fee23f9Smrg  extern function<void()> __once_functor;
8384fee23f9Smrg
8394fee23f9Smrg  extern void
8404fee23f9Smrg  __set_once_functor_lock_ptr(unique_lock<mutex>*);
8414fee23f9Smrg
8424fee23f9Smrg  extern mutex&
8434fee23f9Smrg  __get_once_mutex();
8444fee23f9Smrg
845e9e6e0f6Smrg  // RAII type to set up state for pthread_once call.
846e9e6e0f6Smrg  struct once_flag::_Prepare_execution
847e9e6e0f6Smrg  {
848e9e6e0f6Smrg    template<typename _Callable>
849e9e6e0f6Smrg      explicit
850e9e6e0f6Smrg      _Prepare_execution(_Callable& __c)
851e9e6e0f6Smrg      {
852e9e6e0f6Smrg	// Store the callable in the global std::function
853e9e6e0f6Smrg	__once_functor = __c;
854e9e6e0f6Smrg	__set_once_functor_lock_ptr(&_M_functor_lock);
855e9e6e0f6Smrg      }
856e9e6e0f6Smrg
857e9e6e0f6Smrg    ~_Prepare_execution()
858e9e6e0f6Smrg    {
859e9e6e0f6Smrg      if (_M_functor_lock)
860e9e6e0f6Smrg	__set_once_functor_lock_ptr(nullptr);
861e9e6e0f6Smrg    }
862e9e6e0f6Smrg
863e9e6e0f6Smrg  private:
864e9e6e0f6Smrg    // XXX This deadlocks if used recursively (PR 97949)
865e9e6e0f6Smrg    unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
866e9e6e0f6Smrg
867e9e6e0f6Smrg    _Prepare_execution(const _Prepare_execution&) = delete;
868e9e6e0f6Smrg    _Prepare_execution& operator=(const _Prepare_execution&) = delete;
869e9e6e0f6Smrg  };
870e9e6e0f6Smrg# endif
87143265c03Smrg  /// @endcond
8724fee23f9Smrg
873e9e6e0f6Smrg  // This function is passed to pthread_once by std::call_once.
874e9e6e0f6Smrg  // It runs __once_call() or __once_functor().
875e9e6e0f6Smrg  extern "C" void __once_proxy(void);
876e9e6e0f6Smrg
87743265c03Smrg  /// Invoke a callable and synchronize with other calls using the same flag
8784fee23f9Smrg  template<typename _Callable, typename... _Args>
8794fee23f9Smrg    void
88048fb7bfaSmrg    call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
8814fee23f9Smrg    {
882e9e6e0f6Smrg      // Closure type that runs the function
883cdbfa754Smrg      auto __callable = [&] {
884cdbfa754Smrg	  std::__invoke(std::forward<_Callable>(__f),
88548fb7bfaSmrg			std::forward<_Args>(__args)...);
886cdbfa754Smrg      };
8874fee23f9Smrg
888e9e6e0f6Smrg      once_flag::_Prepare_execution __exec(__callable);
8894fee23f9Smrg
890e9e6e0f6Smrg      // XXX pthread_once does not reset the flag if an exception is thrown.
891e9e6e0f6Smrg      if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
8924fee23f9Smrg	__throw_system_error(__e);
8934fee23f9Smrg    }
894e9e6e0f6Smrg
895e9e6e0f6Smrg#else // _GLIBCXX_HAS_GTHREADS
896e9e6e0f6Smrg
897e9e6e0f6Smrg  /// Flag type used by std::call_once
898e9e6e0f6Smrg  struct once_flag
899e9e6e0f6Smrg  {
900e9e6e0f6Smrg    constexpr once_flag() noexcept = default;
901e9e6e0f6Smrg
902e9e6e0f6Smrg    /// Deleted copy constructor
903e9e6e0f6Smrg    once_flag(const once_flag&) = delete;
904e9e6e0f6Smrg    /// Deleted assignment operator
905e9e6e0f6Smrg    once_flag& operator=(const once_flag&) = delete;
906e9e6e0f6Smrg
907e9e6e0f6Smrg  private:
908e9e6e0f6Smrg    // There are two different std::once_flag interfaces, abstracting four
909e9e6e0f6Smrg    // different implementations.
910e9e6e0f6Smrg    // The single-threaded interface uses the _M_activate() and _M_finish(bool)
911e9e6e0f6Smrg    // functions, which start and finish an active execution respectively.
912e9e6e0f6Smrg    // See [thread.once.callonce] in C++11 for the definition of
913e9e6e0f6Smrg    // active/passive/returning/exceptional executions.
914e9e6e0f6Smrg    enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
915e9e6e0f6Smrg
916e9e6e0f6Smrg    int _M_once = _Bits::_Init;
917e9e6e0f6Smrg
918e9e6e0f6Smrg    // Check to see if all executions will be passive now.
919e9e6e0f6Smrg    bool
920e9e6e0f6Smrg    _M_passive() const noexcept;
921e9e6e0f6Smrg
922e9e6e0f6Smrg    // Attempts to begin an active execution.
923e9e6e0f6Smrg    bool _M_activate();
924e9e6e0f6Smrg
925e9e6e0f6Smrg    // Must be called to complete an active execution.
926e9e6e0f6Smrg    // The argument is true if the active execution was a returning execution,
927e9e6e0f6Smrg    // false if it was an exceptional execution.
928e9e6e0f6Smrg    void _M_finish(bool __returning) noexcept;
929e9e6e0f6Smrg
930e9e6e0f6Smrg    // RAII helper to call _M_finish.
931e9e6e0f6Smrg    struct _Active_execution
932e9e6e0f6Smrg    {
933e9e6e0f6Smrg      explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
934e9e6e0f6Smrg
935e9e6e0f6Smrg      ~_Active_execution() { _M_flag._M_finish(_M_returning); }
936e9e6e0f6Smrg
937e9e6e0f6Smrg      _Active_execution(const _Active_execution&) = delete;
938e9e6e0f6Smrg      _Active_execution& operator=(const _Active_execution&) = delete;
939e9e6e0f6Smrg
940e9e6e0f6Smrg      once_flag& _M_flag;
941e9e6e0f6Smrg      bool _M_returning = false;
942e9e6e0f6Smrg    };
943e9e6e0f6Smrg
944e9e6e0f6Smrg    template<typename _Callable, typename... _Args>
945e9e6e0f6Smrg      friend void
946e9e6e0f6Smrg      call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
947e9e6e0f6Smrg  };
948e9e6e0f6Smrg
949e9e6e0f6Smrg  // Inline definitions of std::once_flag members for single-threaded targets.
950e9e6e0f6Smrg
951e9e6e0f6Smrg  inline bool
952e9e6e0f6Smrg  once_flag::_M_passive() const noexcept
953e9e6e0f6Smrg  { return _M_once == _Bits::_Done; }
954e9e6e0f6Smrg
955e9e6e0f6Smrg  inline bool
956e9e6e0f6Smrg  once_flag::_M_activate()
957e9e6e0f6Smrg  {
958e9e6e0f6Smrg    if (_M_once == _Bits::_Init) [[__likely__]]
959e9e6e0f6Smrg      {
960e9e6e0f6Smrg	_M_once = _Bits::_Active;
961e9e6e0f6Smrg	return true;
962e9e6e0f6Smrg      }
963e9e6e0f6Smrg    else if (_M_passive()) // Caller should have checked this already.
964e9e6e0f6Smrg      return false;
965e9e6e0f6Smrg    else
966e9e6e0f6Smrg      __throw_system_error(EDEADLK);
967e9e6e0f6Smrg  }
968e9e6e0f6Smrg
969e9e6e0f6Smrg  inline void
970e9e6e0f6Smrg  once_flag::_M_finish(bool __returning) noexcept
971e9e6e0f6Smrg  { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
972e9e6e0f6Smrg
973e9e6e0f6Smrg  /// Invoke a callable and synchronize with other calls using the same flag
974e9e6e0f6Smrg  template<typename _Callable, typename... _Args>
975e9e6e0f6Smrg    inline void
976e9e6e0f6Smrg    call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
977e9e6e0f6Smrg    {
978e9e6e0f6Smrg      if (__once._M_passive())
979e9e6e0f6Smrg	return;
980e9e6e0f6Smrg      else if (__once._M_activate())
981e9e6e0f6Smrg	{
982e9e6e0f6Smrg	  once_flag::_Active_execution __exec(__once);
983e9e6e0f6Smrg
984e9e6e0f6Smrg	  // _GLIBCXX_RESOLVE_LIB_DEFECTS
985e9e6e0f6Smrg	  // 2442. call_once() shouldn't DECAY_COPY()
986e9e6e0f6Smrg	  std::__invoke(std::forward<_Callable>(__f),
987e9e6e0f6Smrg			std::forward<_Args>(__args)...);
988e9e6e0f6Smrg
989e9e6e0f6Smrg	  // __f(__args...) did not throw
990e9e6e0f6Smrg	  __exec._M_returning = true;
991e9e6e0f6Smrg	}
992e9e6e0f6Smrg    }
99348fb7bfaSmrg#endif // _GLIBCXX_HAS_GTHREADS
9944fee23f9Smrg
9959f30ce74Smrg  /// @} group mutexes
99648fb7bfaSmrg_GLIBCXX_END_NAMESPACE_VERSION
99748fb7bfaSmrg} // namespace
9984fee23f9Smrg
99948fb7bfaSmrg#endif // C++11
10004fee23f9Smrg
10014fee23f9Smrg#endif // _GLIBCXX_MUTEX
1002