xref: /netbsd-src/external/gpl3/gcc/dist/libstdc++-v3/include/std/mutex (revision 4fe0f936ff464bca8e6277bde90f477ef5a4d004)
1// <mutex> -*- C++ -*-
2
3// Copyright (C) 2003-2022 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/mutex
26 *  This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_MUTEX
30#define _GLIBCXX_MUTEX 1
31
32#pragma GCC system_header
33
34#if __cplusplus < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <tuple>
39#include <exception>
40#include <type_traits>
41#include <system_error>
42#include <bits/chrono.h>
43#include <bits/std_mutex.h>
44#include <bits/unique_lock.h>
45#if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
46# include <condition_variable>
47# include <thread>
48#endif
49#include <ext/atomicity.h>     // __gnu_cxx::__is_single_threaded
50
51#if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
52# include <bits/std_function.h>  // std::function
53#endif
54
55namespace std _GLIBCXX_VISIBILITY(default)
56{
57_GLIBCXX_BEGIN_NAMESPACE_VERSION
58
59  /**
60   * @addtogroup mutexes
61   * @{
62   */
63
64#ifdef _GLIBCXX_HAS_GTHREADS
65  /// @cond undocumented
66
67  // Common base class for std::recursive_mutex and std::recursive_timed_mutex
68  class __recursive_mutex_base
69  {
70  protected:
71    typedef __gthread_recursive_mutex_t		__native_type;
72
73    __recursive_mutex_base(const __recursive_mutex_base&) = delete;
74    __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
75
76#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
77    __native_type  _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
78
79    __recursive_mutex_base() = default;
80#else
81    __native_type  _M_mutex;
82
83    __recursive_mutex_base()
84    {
85      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
86      __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
87    }
88
89    ~__recursive_mutex_base()
90    { __gthread_recursive_mutex_destroy(&_M_mutex); }
91#endif
92  };
93  /// @endcond
94
95  /** The standard recursive mutex type.
96   *
97   * A recursive mutex can be locked more than once by the same thread.
98   * Other threads cannot lock the mutex until the owning thread unlocks it
99   * as many times as it was locked.
100   *
101   * @headerfile mutex
102   * @since C++11
103   */
104  class recursive_mutex : private __recursive_mutex_base
105  {
106  public:
107    typedef __native_type* 			native_handle_type;
108
109    recursive_mutex() = default;
110    ~recursive_mutex() = default;
111
112    recursive_mutex(const recursive_mutex&) = delete;
113    recursive_mutex& operator=(const recursive_mutex&) = delete;
114
115    void
116    lock()
117    {
118      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
119
120      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
121      if (__e)
122	__throw_system_error(__e);
123    }
124
125    bool
126    try_lock() noexcept
127    {
128      // XXX EINVAL, EAGAIN, EBUSY
129      return !__gthread_recursive_mutex_trylock(&_M_mutex);
130    }
131
132    void
133    unlock()
134    {
135      // XXX EINVAL, EAGAIN, EBUSY
136      __gthread_recursive_mutex_unlock(&_M_mutex);
137    }
138
139    native_handle_type
140    native_handle() noexcept
141    { return &_M_mutex; }
142  };
143
144#if _GTHREAD_USE_MUTEX_TIMEDLOCK
145  /// @cond undocumented
146
147  template<typename _Derived>
148    class __timed_mutex_impl
149    {
150    protected:
151      template<typename _Rep, typename _Period>
152	bool
153	_M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
154	{
155#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
156	  using __clock = chrono::steady_clock;
157#else
158	  using __clock = chrono::system_clock;
159#endif
160
161	  auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
162	  if (ratio_greater<__clock::period, _Period>())
163	    ++__rt;
164	  return _M_try_lock_until(__clock::now() + __rt);
165	}
166
167      template<typename _Duration>
168	bool
169	_M_try_lock_until(const chrono::time_point<chrono::system_clock,
170						   _Duration>& __atime)
171	{
172	  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
173	  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
174
175	  __gthread_time_t __ts = {
176	    static_cast<std::time_t>(__s.time_since_epoch().count()),
177	    static_cast<long>(__ns.count())
178	  };
179
180	  return static_cast<_Derived*>(this)->_M_timedlock(__ts);
181	}
182
183#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
184      template<typename _Duration>
185	bool
186	_M_try_lock_until(const chrono::time_point<chrono::steady_clock,
187						   _Duration>& __atime)
188	{
189	  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
190	  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
191
192	  __gthread_time_t __ts = {
193	    static_cast<std::time_t>(__s.time_since_epoch().count()),
194	    static_cast<long>(__ns.count())
195	  };
196
197	  return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
198							    __ts);
199	}
200#endif
201
202      template<typename _Clock, typename _Duration>
203	bool
204	_M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
205	{
206#if __cplusplus > 201703L
207	  static_assert(chrono::is_clock_v<_Clock>);
208#endif
209	  // The user-supplied clock may not tick at the same rate as
210	  // steady_clock, so we must loop in order to guarantee that
211	  // the timeout has expired before returning false.
212	  auto __now = _Clock::now();
213	  do {
214	    auto __rtime = __atime - __now;
215	    if (_M_try_lock_for(__rtime))
216	      return true;
217	    __now = _Clock::now();
218	  } while (__atime > __now);
219	  return false;
220	}
221    };
222  /// @endcond
223
224  /** The standard timed mutex type.
225   *
226   * A non-recursive mutex that supports a timeout when trying to acquire the
227   * lock.
228   *
229   * @headerfile mutex
230   * @since C++11
231   */
232  class timed_mutex
233  : private __mutex_base, public __timed_mutex_impl<timed_mutex>
234  {
235  public:
236    typedef __native_type* 		  	native_handle_type;
237
238    timed_mutex() = default;
239    ~timed_mutex() = default;
240
241    timed_mutex(const timed_mutex&) = delete;
242    timed_mutex& operator=(const timed_mutex&) = delete;
243
244    void
245    lock()
246    {
247      int __e = __gthread_mutex_lock(&_M_mutex);
248
249      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
250      if (__e)
251	__throw_system_error(__e);
252    }
253
254    bool
255    try_lock() noexcept
256    {
257      // XXX EINVAL, EAGAIN, EBUSY
258      return !__gthread_mutex_trylock(&_M_mutex);
259    }
260
261    template <class _Rep, class _Period>
262      bool
263      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
264      { return _M_try_lock_for(__rtime); }
265
266    template <class _Clock, class _Duration>
267      bool
268      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
269      { return _M_try_lock_until(__atime); }
270
271    void
272    unlock()
273    {
274      // XXX EINVAL, EAGAIN, EBUSY
275      __gthread_mutex_unlock(&_M_mutex);
276    }
277
278    native_handle_type
279    native_handle() noexcept
280    { return &_M_mutex; }
281
282    private:
283      friend class __timed_mutex_impl<timed_mutex>;
284
285      bool
286      _M_timedlock(const __gthread_time_t& __ts)
287      { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
288
289#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
290      bool
291      _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
292      { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
293#endif
294  };
295
296  /** The standard recursive timed mutex type.
297   *
298   * A recursive mutex that supports a timeout when trying to acquire the
299   * lock. A recursive mutex can be locked more than once by the same thread.
300   * Other threads cannot lock the mutex until the owning thread unlocks it
301   * as many times as it was locked.
302   *
303   * @headerfile mutex
304   * @since C++11
305   */
306  class recursive_timed_mutex
307  : private __recursive_mutex_base,
308    public __timed_mutex_impl<recursive_timed_mutex>
309  {
310  public:
311    typedef __native_type* 			native_handle_type;
312
313    recursive_timed_mutex() = default;
314    ~recursive_timed_mutex() = default;
315
316    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
317    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
318
319    void
320    lock()
321    {
322      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
323
324      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
325      if (__e)
326	__throw_system_error(__e);
327    }
328
329    bool
330    try_lock() noexcept
331    {
332      // XXX EINVAL, EAGAIN, EBUSY
333      return !__gthread_recursive_mutex_trylock(&_M_mutex);
334    }
335
336    template <class _Rep, class _Period>
337      bool
338      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
339      { return _M_try_lock_for(__rtime); }
340
341    template <class _Clock, class _Duration>
342      bool
343      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
344      { return _M_try_lock_until(__atime); }
345
346    void
347    unlock()
348    {
349      // XXX EINVAL, EAGAIN, EBUSY
350      __gthread_recursive_mutex_unlock(&_M_mutex);
351    }
352
353    native_handle_type
354    native_handle() noexcept
355    { return &_M_mutex; }
356
357    private:
358      friend class __timed_mutex_impl<recursive_timed_mutex>;
359
360      bool
361      _M_timedlock(const __gthread_time_t& __ts)
362      { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
363
364#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
365      bool
366      _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
367      { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
368#endif
369  };
370
371#else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
372
373  /// timed_mutex
374  class timed_mutex
375  {
376    mutex		_M_mut;
377    condition_variable	_M_cv;
378    bool		_M_locked = false;
379
380  public:
381
382    timed_mutex() = default;
383    ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
384
385    timed_mutex(const timed_mutex&) = delete;
386    timed_mutex& operator=(const timed_mutex&) = delete;
387
388    void
389    lock()
390    {
391      unique_lock<mutex> __lk(_M_mut);
392      _M_cv.wait(__lk, [&]{ return !_M_locked; });
393      _M_locked = true;
394    }
395
396    bool
397    try_lock()
398    {
399      lock_guard<mutex> __lk(_M_mut);
400      if (_M_locked)
401	return false;
402      _M_locked = true;
403      return true;
404    }
405
406    template<typename _Rep, typename _Period>
407      bool
408      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
409      {
410	unique_lock<mutex> __lk(_M_mut);
411	if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
412	  return false;
413	_M_locked = true;
414	return true;
415      }
416
417    template<typename _Clock, typename _Duration>
418      bool
419      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
420      {
421	unique_lock<mutex> __lk(_M_mut);
422	if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
423	  return false;
424	_M_locked = true;
425	return true;
426      }
427
428    void
429    unlock()
430    {
431      lock_guard<mutex> __lk(_M_mut);
432      __glibcxx_assert( _M_locked );
433      _M_locked = false;
434      _M_cv.notify_one();
435    }
436  };
437
438  /// recursive_timed_mutex
439  class recursive_timed_mutex
440  {
441    mutex		_M_mut;
442    condition_variable	_M_cv;
443    thread::id		_M_owner;
444    unsigned		_M_count = 0;
445
446    // Predicate type that tests whether the current thread can lock a mutex.
447    struct _Can_lock
448    {
449      // Returns true if the mutex is unlocked or is locked by _M_caller.
450      bool
451      operator()() const noexcept
452      { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
453
454      const recursive_timed_mutex* _M_mx;
455      thread::id _M_caller;
456    };
457
458  public:
459
460    recursive_timed_mutex() = default;
461    ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
462
463    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
464    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
465
466    void
467    lock()
468    {
469      auto __id = this_thread::get_id();
470      _Can_lock __can_lock{this, __id};
471      unique_lock<mutex> __lk(_M_mut);
472      _M_cv.wait(__lk, __can_lock);
473      if (_M_count == -1u)
474	__throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
475      _M_owner = __id;
476      ++_M_count;
477    }
478
479    bool
480    try_lock()
481    {
482      auto __id = this_thread::get_id();
483      _Can_lock __can_lock{this, __id};
484      lock_guard<mutex> __lk(_M_mut);
485      if (!__can_lock())
486	return false;
487      if (_M_count == -1u)
488	return false;
489      _M_owner = __id;
490      ++_M_count;
491      return true;
492    }
493
494    template<typename _Rep, typename _Period>
495      bool
496      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
497      {
498	auto __id = this_thread::get_id();
499	_Can_lock __can_lock{this, __id};
500	unique_lock<mutex> __lk(_M_mut);
501	if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
502	  return false;
503	if (_M_count == -1u)
504	  return false;
505	_M_owner = __id;
506	++_M_count;
507	return true;
508      }
509
510    template<typename _Clock, typename _Duration>
511      bool
512      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
513      {
514	auto __id = this_thread::get_id();
515	_Can_lock __can_lock{this, __id};
516	unique_lock<mutex> __lk(_M_mut);
517	if (!_M_cv.wait_until(__lk, __atime, __can_lock))
518	  return false;
519	if (_M_count == -1u)
520	  return false;
521	_M_owner = __id;
522	++_M_count;
523	return true;
524      }
525
526    void
527    unlock()
528    {
529      lock_guard<mutex> __lk(_M_mut);
530      __glibcxx_assert( _M_owner == this_thread::get_id() );
531      __glibcxx_assert( _M_count > 0 );
532      if (--_M_count == 0)
533	{
534	  _M_owner = {};
535	  _M_cv.notify_one();
536	}
537    }
538  };
539
540#endif
541#endif // _GLIBCXX_HAS_GTHREADS
542
543  /// @cond undocumented
544  namespace __detail
545  {
546    // Lock the last lockable, after all previous ones are locked.
547    template<typename _Lockable>
548      inline int
549      __try_lock_impl(_Lockable& __l)
550      {
551	if (unique_lock<_Lockable> __lock{__l, try_to_lock})
552	  {
553	    __lock.release();
554	    return -1;
555	  }
556	else
557	  return 0;
558      }
559
560    // Lock each lockable in turn.
561    // Use iteration if all lockables are the same type, recursion otherwise.
562    template<typename _L0, typename... _Lockables>
563      inline int
564      __try_lock_impl(_L0& __l0, _Lockables&... __lockables)
565      {
566#if __cplusplus >= 201703L
567	if constexpr ((is_same_v<_L0, _Lockables> && ...))
568	  {
569	    constexpr int _Np = 1 + sizeof...(_Lockables);
570	    unique_lock<_L0> __locks[_Np] = {
571		{__l0, defer_lock}, {__lockables, defer_lock}...
572	    };
573	    for (int __i = 0; __i < _Np; ++__i)
574	      {
575		if (!__locks[__i].try_lock())
576		  {
577		    const int __failed = __i;
578		    while (__i--)
579		      __locks[__i].unlock();
580		    return __failed;
581		  }
582	      }
583	    for (auto& __l : __locks)
584	      __l.release();
585	    return -1;
586	  }
587	else
588#endif
589	if (unique_lock<_L0> __lock{__l0, try_to_lock})
590	  {
591	    int __idx = __detail::__try_lock_impl(__lockables...);
592	    if (__idx == -1)
593	      {
594		__lock.release();
595		return -1;
596	      }
597	    return __idx + 1;
598	  }
599	else
600	  return 0;
601      }
602
603  } // namespace __detail
604  /// @endcond
605
606  /** @brief Generic try_lock.
607   *  @param __l1 Meets Lockable requirements (try_lock() may throw).
608   *  @param __l2 Meets Lockable requirements (try_lock() may throw).
609   *  @param __l3 Meets Lockable requirements (try_lock() may throw).
610   *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
611   *          a 0-based index corresponding to the argument that returned false.
612   *  @post Either all arguments are locked, or none will be.
613   *
614   *  Sequentially calls try_lock() on each argument.
615   */
616  template<typename _L1, typename _L2, typename... _L3>
617    inline int
618    try_lock(_L1& __l1, _L2& __l2, _L3&... __l3)
619    {
620      return __detail::__try_lock_impl(__l1, __l2, __l3...);
621    }
622
623  /// @cond undocumented
624  namespace __detail
625  {
626    // This function can recurse up to N levels deep, for N = 1+sizeof...(L1).
627    // On each recursion the lockables are rotated left one position,
628    // e.g. depth 0: l0, l1, l2; depth 1: l1, l2, l0; depth 2: l2, l0, l1.
629    // When a call to l_i.try_lock() fails it recurses/returns to depth=i
630    // so that l_i is the first argument, and then blocks until l_i is locked.
631    template<typename _L0, typename... _L1>
632      void
633      __lock_impl(int& __i, int __depth, _L0& __l0, _L1&... __l1)
634      {
635	while (__i >= __depth)
636	  {
637	    if (__i == __depth)
638	      {
639		int __failed = 1; // index that couldn't be locked
640		{
641		  unique_lock<_L0> __first(__l0);
642		  __failed += __detail::__try_lock_impl(__l1...);
643		  if (!__failed)
644		    {
645		      __i = -1; // finished
646		      __first.release();
647		      return;
648		    }
649		}
650#if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
651		__gthread_yield();
652#endif
653		constexpr auto __n = 1 + sizeof...(_L1);
654		__i = (__depth + __failed) % __n;
655	      }
656	    else // rotate left until l_i is first.
657	      __detail::__lock_impl(__i, __depth + 1, __l1..., __l0);
658	  }
659      }
660
661  } // namespace __detail
662  /// @endcond
663
664  /** @brief Generic lock.
665   *  @param __l1 Meets Lockable requirements (try_lock() may throw).
666   *  @param __l2 Meets Lockable requirements (try_lock() may throw).
667   *  @param __l3 Meets Lockable requirements (try_lock() may throw).
668   *  @throw An exception thrown by an argument's lock() or try_lock() member.
669   *  @post All arguments are locked.
670   *
671   *  All arguments are locked via a sequence of calls to lock(), try_lock()
672   *  and unlock().  If this function exits via an exception any locks that
673   *  were obtained will be released.
674   */
675  template<typename _L1, typename _L2, typename... _L3>
676    void
677    lock(_L1& __l1, _L2& __l2, _L3&... __l3)
678    {
679#if __cplusplus >= 201703L
680      if constexpr (is_same_v<_L1, _L2> && (is_same_v<_L1, _L3> && ...))
681	{
682	  constexpr int _Np = 2 + sizeof...(_L3);
683	  unique_lock<_L1> __locks[] = {
684	      {__l1, defer_lock}, {__l2, defer_lock}, {__l3, defer_lock}...
685	  };
686	  int __first = 0;
687	  do {
688	    __locks[__first].lock();
689	    for (int __j = 1; __j < _Np; ++__j)
690	      {
691		const int __idx = (__first + __j) % _Np;
692		if (!__locks[__idx].try_lock())
693		  {
694		    for (int __k = __j; __k != 0; --__k)
695		      __locks[(__first + __k - 1) % _Np].unlock();
696		    __first = __idx;
697		    break;
698		  }
699	      }
700	  } while (!__locks[__first].owns_lock());
701
702	  for (auto& __l : __locks)
703	    __l.release();
704	}
705      else
706#endif
707	{
708	  int __i = 0;
709	  __detail::__lock_impl(__i, 0, __l1, __l2, __l3...);
710	}
711    }
712
713#if __cplusplus >= 201703L
714#define __cpp_lib_scoped_lock 201703L
715  /** @brief A scoped lock type for multiple lockable objects.
716   *
717   * A scoped_lock controls mutex ownership within a scope, releasing
718   * ownership in the destructor.
719   *
720   * @headerfile mutex
721   * @since C++17
722   */
723  template<typename... _MutexTypes>
724    class scoped_lock
725    {
726    public:
727      explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
728      { std::lock(__m...); }
729
730      explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
731      : _M_devices(std::tie(__m...))
732      { } // calling thread owns mutex
733
734      ~scoped_lock()
735      { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
736
737      scoped_lock(const scoped_lock&) = delete;
738      scoped_lock& operator=(const scoped_lock&) = delete;
739
740    private:
741      tuple<_MutexTypes&...> _M_devices;
742    };
743
744  template<>
745    class scoped_lock<>
746    {
747    public:
748      explicit scoped_lock() = default;
749      explicit scoped_lock(adopt_lock_t) noexcept { }
750      ~scoped_lock() = default;
751
752      scoped_lock(const scoped_lock&) = delete;
753      scoped_lock& operator=(const scoped_lock&) = delete;
754    };
755
756  template<typename _Mutex>
757    class scoped_lock<_Mutex>
758    {
759    public:
760      using mutex_type = _Mutex;
761
762      explicit scoped_lock(mutex_type& __m) : _M_device(__m)
763      { _M_device.lock(); }
764
765      explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
766      : _M_device(__m)
767      { } // calling thread owns mutex
768
769      ~scoped_lock()
770      { _M_device.unlock(); }
771
772      scoped_lock(const scoped_lock&) = delete;
773      scoped_lock& operator=(const scoped_lock&) = delete;
774
775    private:
776      mutex_type&  _M_device;
777    };
778#endif // C++17
779
780#ifdef _GLIBCXX_HAS_GTHREADS
781  /// Flag type used by std::call_once
782  struct once_flag
783  {
784    constexpr once_flag() noexcept = default;
785
786    /// Deleted copy constructor
787    once_flag(const once_flag&) = delete;
788    /// Deleted assignment operator
789    once_flag& operator=(const once_flag&) = delete;
790
791  private:
792    // For gthreads targets a pthread_once_t is used with pthread_once, but
793    // for most targets this doesn't work correctly for exceptional executions.
794    __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
795
796    struct _Prepare_execution;
797
798    template<typename _Callable, typename... _Args>
799      friend void
800      call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
801  };
802
803  /// @cond undocumented
804# ifdef _GLIBCXX_HAVE_TLS
805  // If TLS is available use thread-local state for the type-erased callable
806  // that is being run by std::call_once in the current thread.
807  extern __thread void* __once_callable;
808  extern __thread void (*__once_call)();
809
810  // RAII type to set up state for pthread_once call.
811  struct once_flag::_Prepare_execution
812  {
813    template<typename _Callable>
814      explicit
815      _Prepare_execution(_Callable& __c)
816      {
817	// Store address in thread-local pointer:
818	__once_callable = std::__addressof(__c);
819	// Trampoline function to invoke the closure via thread-local pointer:
820	__once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
821      }
822
823    ~_Prepare_execution()
824    {
825      // PR libstdc++/82481
826      __once_callable = nullptr;
827      __once_call = nullptr;
828    }
829
830    _Prepare_execution(const _Prepare_execution&) = delete;
831    _Prepare_execution& operator=(const _Prepare_execution&) = delete;
832  };
833
834# else
835  // Without TLS use a global std::mutex and store the callable in a
836  // global std::function.
837  extern function<void()> __once_functor;
838
839  extern void
840  __set_once_functor_lock_ptr(unique_lock<mutex>*);
841
842  extern mutex&
843  __get_once_mutex();
844
845  // RAII type to set up state for pthread_once call.
846  struct once_flag::_Prepare_execution
847  {
848    template<typename _Callable>
849      explicit
850      _Prepare_execution(_Callable& __c)
851      {
852	// Store the callable in the global std::function
853	__once_functor = __c;
854	__set_once_functor_lock_ptr(&_M_functor_lock);
855      }
856
857    ~_Prepare_execution()
858    {
859      if (_M_functor_lock)
860	__set_once_functor_lock_ptr(nullptr);
861    }
862
863  private:
864    // XXX This deadlocks if used recursively (PR 97949)
865    unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
866
867    _Prepare_execution(const _Prepare_execution&) = delete;
868    _Prepare_execution& operator=(const _Prepare_execution&) = delete;
869  };
870# endif
871  /// @endcond
872
873  // This function is passed to pthread_once by std::call_once.
874  // It runs __once_call() or __once_functor().
875  extern "C" void __once_proxy(void);
876
877  /// Invoke a callable and synchronize with other calls using the same flag
878  template<typename _Callable, typename... _Args>
879    void
880    call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
881    {
882      // Closure type that runs the function
883      auto __callable = [&] {
884	  std::__invoke(std::forward<_Callable>(__f),
885			std::forward<_Args>(__args)...);
886      };
887
888      once_flag::_Prepare_execution __exec(__callable);
889
890      // XXX pthread_once does not reset the flag if an exception is thrown.
891      if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
892	__throw_system_error(__e);
893    }
894
895#else // _GLIBCXX_HAS_GTHREADS
896
897  /// Flag type used by std::call_once
898  struct once_flag
899  {
900    constexpr once_flag() noexcept = default;
901
902    /// Deleted copy constructor
903    once_flag(const once_flag&) = delete;
904    /// Deleted assignment operator
905    once_flag& operator=(const once_flag&) = delete;
906
907  private:
908    // There are two different std::once_flag interfaces, abstracting four
909    // different implementations.
910    // The single-threaded interface uses the _M_activate() and _M_finish(bool)
911    // functions, which start and finish an active execution respectively.
912    // See [thread.once.callonce] in C++11 for the definition of
913    // active/passive/returning/exceptional executions.
914    enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
915
916    int _M_once = _Bits::_Init;
917
918    // Check to see if all executions will be passive now.
919    bool
920    _M_passive() const noexcept;
921
922    // Attempts to begin an active execution.
923    bool _M_activate();
924
925    // Must be called to complete an active execution.
926    // The argument is true if the active execution was a returning execution,
927    // false if it was an exceptional execution.
928    void _M_finish(bool __returning) noexcept;
929
930    // RAII helper to call _M_finish.
931    struct _Active_execution
932    {
933      explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
934
935      ~_Active_execution() { _M_flag._M_finish(_M_returning); }
936
937      _Active_execution(const _Active_execution&) = delete;
938      _Active_execution& operator=(const _Active_execution&) = delete;
939
940      once_flag& _M_flag;
941      bool _M_returning = false;
942    };
943
944    template<typename _Callable, typename... _Args>
945      friend void
946      call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
947  };
948
949  // Inline definitions of std::once_flag members for single-threaded targets.
950
951  inline bool
952  once_flag::_M_passive() const noexcept
953  { return _M_once == _Bits::_Done; }
954
955  inline bool
956  once_flag::_M_activate()
957  {
958    if (_M_once == _Bits::_Init) [[__likely__]]
959      {
960	_M_once = _Bits::_Active;
961	return true;
962      }
963    else if (_M_passive()) // Caller should have checked this already.
964      return false;
965    else
966      __throw_system_error(EDEADLK);
967  }
968
969  inline void
970  once_flag::_M_finish(bool __returning) noexcept
971  { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
972
973  /// Invoke a callable and synchronize with other calls using the same flag
974  template<typename _Callable, typename... _Args>
975    inline void
976    call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
977    {
978      if (__once._M_passive())
979	return;
980      else if (__once._M_activate())
981	{
982	  once_flag::_Active_execution __exec(__once);
983
984	  // _GLIBCXX_RESOLVE_LIB_DEFECTS
985	  // 2442. call_once() shouldn't DECAY_COPY()
986	  std::__invoke(std::forward<_Callable>(__f),
987			std::forward<_Args>(__args)...);
988
989	  // __f(__args...) did not throw
990	  __exec._M_returning = true;
991	}
992    }
993#endif // _GLIBCXX_HAS_GTHREADS
994
995  /// @} group mutexes
996_GLIBCXX_END_NAMESPACE_VERSION
997} // namespace
998
999#endif // C++11
1000
1001#endif // _GLIBCXX_MUTEX
1002