xref: /dflybsd-src/contrib/gcc-8.0/libstdc++-v3/include/bits/atomic_futex.h (revision 38fd149817dfbff97799f62fcb70be98c4e32523)
1*38fd1498Szrj // -*- C++ -*- header.
2*38fd1498Szrj 
3*38fd1498Szrj // Copyright (C) 2015-2018 Free Software Foundation, Inc.
4*38fd1498Szrj //
5*38fd1498Szrj // This file is part of the GNU ISO C++ Library.  This library is free
6*38fd1498Szrj // software; you can redistribute it and/or modify it under the
7*38fd1498Szrj // terms of the GNU General Public License as published by the
8*38fd1498Szrj // Free Software Foundation; either version 3, or (at your option)
9*38fd1498Szrj // any later version.
10*38fd1498Szrj 
11*38fd1498Szrj // This library is distributed in the hope that it will be useful,
12*38fd1498Szrj // but WITHOUT ANY WARRANTY; without even the implied warranty of
13*38fd1498Szrj // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14*38fd1498Szrj // GNU General Public License for more details.
15*38fd1498Szrj 
16*38fd1498Szrj // Under Section 7 of GPL version 3, you are granted additional
17*38fd1498Szrj // permissions described in the GCC Runtime Library Exception, version
18*38fd1498Szrj // 3.1, as published by the Free Software Foundation.
19*38fd1498Szrj 
20*38fd1498Szrj // You should have received a copy of the GNU General Public License and
21*38fd1498Szrj // a copy of the GCC Runtime Library Exception along with this program;
22*38fd1498Szrj // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23*38fd1498Szrj // <http://www.gnu.org/licenses/>.
24*38fd1498Szrj 
25*38fd1498Szrj /** @file bits/atomic_futex.h
26*38fd1498Szrj  *  This is an internal header file, included by other library headers.
27*38fd1498Szrj  *  Do not attempt to use it directly.
28*38fd1498Szrj  */
29*38fd1498Szrj 
30*38fd1498Szrj #ifndef _GLIBCXX_ATOMIC_FUTEX_H
31*38fd1498Szrj #define _GLIBCXX_ATOMIC_FUTEX_H 1
32*38fd1498Szrj 
33*38fd1498Szrj #pragma GCC system_header
34*38fd1498Szrj 
35*38fd1498Szrj #include <bits/c++config.h>
36*38fd1498Szrj #include <atomic>
37*38fd1498Szrj #include <chrono>
38*38fd1498Szrj #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
39*38fd1498Szrj #include <mutex>
40*38fd1498Szrj #include <condition_variable>
41*38fd1498Szrj #endif
42*38fd1498Szrj 
43*38fd1498Szrj #ifndef _GLIBCXX_ALWAYS_INLINE
44*38fd1498Szrj #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
45*38fd1498Szrj #endif
46*38fd1498Szrj 
_GLIBCXX_VISIBILITY(default)47*38fd1498Szrj namespace std _GLIBCXX_VISIBILITY(default)
48*38fd1498Szrj {
49*38fd1498Szrj _GLIBCXX_BEGIN_NAMESPACE_VERSION
50*38fd1498Szrj 
51*38fd1498Szrj #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
52*38fd1498Szrj #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
53*38fd1498Szrj   struct __atomic_futex_unsigned_base
54*38fd1498Szrj   {
55*38fd1498Szrj     // Returns false iff a timeout occurred.
56*38fd1498Szrj     bool
57*38fd1498Szrj     _M_futex_wait_until(unsigned *__addr, unsigned __val, bool __has_timeout,
58*38fd1498Szrj 	chrono::seconds __s, chrono::nanoseconds __ns);
59*38fd1498Szrj 
60*38fd1498Szrj     // This can be executed after the object has been destroyed.
61*38fd1498Szrj     static void _M_futex_notify_all(unsigned* __addr);
62*38fd1498Szrj   };
63*38fd1498Szrj 
64*38fd1498Szrj   template <unsigned _Waiter_bit = 0x80000000>
65*38fd1498Szrj   class __atomic_futex_unsigned : __atomic_futex_unsigned_base
66*38fd1498Szrj   {
67*38fd1498Szrj     typedef chrono::system_clock __clock_t;
68*38fd1498Szrj 
69*38fd1498Szrj     // This must be lock-free and at offset 0.
70*38fd1498Szrj     atomic<unsigned> _M_data;
71*38fd1498Szrj 
72*38fd1498Szrj   public:
73*38fd1498Szrj     explicit
74*38fd1498Szrj     __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
75*38fd1498Szrj     { }
76*38fd1498Szrj 
77*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE unsigned
78*38fd1498Szrj     _M_load(memory_order __mo)
79*38fd1498Szrj     {
80*38fd1498Szrj       return _M_data.load(__mo) & ~_Waiter_bit;
81*38fd1498Szrj     }
82*38fd1498Szrj 
83*38fd1498Szrj   private:
84*38fd1498Szrj     // If a timeout occurs, returns a current value after the timeout;
85*38fd1498Szrj     // otherwise, returns the operand's value if equal is true or a different
86*38fd1498Szrj     // value if equal is false.
87*38fd1498Szrj     // The assumed value is the caller's assumption about the current value
88*38fd1498Szrj     // when making the call.
89*38fd1498Szrj     unsigned
90*38fd1498Szrj     _M_load_and_test_until(unsigned __assumed, unsigned __operand,
91*38fd1498Szrj 	bool __equal, memory_order __mo, bool __has_timeout,
92*38fd1498Szrj 	chrono::seconds __s, chrono::nanoseconds __ns)
93*38fd1498Szrj     {
94*38fd1498Szrj       for (;;)
95*38fd1498Szrj 	{
96*38fd1498Szrj 	  // Don't bother checking the value again because we expect the caller
97*38fd1498Szrj 	  // to have done it recently.
98*38fd1498Szrj 	  // memory_order_relaxed is sufficient because we can rely on just the
99*38fd1498Szrj 	  // modification order (store_notify uses an atomic RMW operation too),
100*38fd1498Szrj 	  // and the futex syscalls synchronize between themselves.
101*38fd1498Szrj 	  _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
102*38fd1498Szrj 	  bool __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
103*38fd1498Szrj 					   __assumed | _Waiter_bit,
104*38fd1498Szrj 					   __has_timeout, __s, __ns);
105*38fd1498Szrj 	  // Fetch the current value after waiting (clears _Waiter_bit).
106*38fd1498Szrj 	  __assumed = _M_load(__mo);
107*38fd1498Szrj 	  if (!__ret || ((__operand == __assumed) == __equal))
108*38fd1498Szrj 	    return __assumed;
109*38fd1498Szrj 	  // TODO adapt wait time
110*38fd1498Szrj 	}
111*38fd1498Szrj     }
112*38fd1498Szrj 
113*38fd1498Szrj     // Returns the operand's value if equal is true or a different value if
114*38fd1498Szrj     // equal is false.
115*38fd1498Szrj     // The assumed value is the caller's assumption about the current value
116*38fd1498Szrj     // when making the call.
117*38fd1498Szrj     unsigned
118*38fd1498Szrj     _M_load_and_test(unsigned __assumed, unsigned __operand,
119*38fd1498Szrj 	bool __equal, memory_order __mo)
120*38fd1498Szrj     {
121*38fd1498Szrj       return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
122*38fd1498Szrj 				    false, {}, {});
123*38fd1498Szrj     }
124*38fd1498Szrj 
125*38fd1498Szrj     // If a timeout occurs, returns a current value after the timeout;
126*38fd1498Szrj     // otherwise, returns the operand's value if equal is true or a different
127*38fd1498Szrj     // value if equal is false.
128*38fd1498Szrj     // The assumed value is the caller's assumption about the current value
129*38fd1498Szrj     // when making the call.
130*38fd1498Szrj     template<typename _Dur>
131*38fd1498Szrj     unsigned
132*38fd1498Szrj     _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
133*38fd1498Szrj 	bool __equal, memory_order __mo,
134*38fd1498Szrj 	const chrono::time_point<__clock_t, _Dur>& __atime)
135*38fd1498Szrj     {
136*38fd1498Szrj       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
137*38fd1498Szrj       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
138*38fd1498Szrj       // XXX correct?
139*38fd1498Szrj       return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
140*38fd1498Szrj 	  true, __s.time_since_epoch(), __ns);
141*38fd1498Szrj     }
142*38fd1498Szrj 
143*38fd1498Szrj   public:
144*38fd1498Szrj 
145*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE unsigned
146*38fd1498Szrj     _M_load_when_not_equal(unsigned __val, memory_order __mo)
147*38fd1498Szrj     {
148*38fd1498Szrj       unsigned __i = _M_load(__mo);
149*38fd1498Szrj       if ((__i & ~_Waiter_bit) != __val)
150*38fd1498Szrj 	return (__i & ~_Waiter_bit);
151*38fd1498Szrj       // TODO Spin-wait first.
152*38fd1498Szrj       return _M_load_and_test(__i, __val, false, __mo);
153*38fd1498Szrj     }
154*38fd1498Szrj 
155*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE void
156*38fd1498Szrj     _M_load_when_equal(unsigned __val, memory_order __mo)
157*38fd1498Szrj     {
158*38fd1498Szrj       unsigned __i = _M_load(__mo);
159*38fd1498Szrj       if ((__i & ~_Waiter_bit) == __val)
160*38fd1498Szrj 	return;
161*38fd1498Szrj       // TODO Spin-wait first.
162*38fd1498Szrj       _M_load_and_test(__i, __val, true, __mo);
163*38fd1498Szrj     }
164*38fd1498Szrj 
165*38fd1498Szrj     // Returns false iff a timeout occurred.
166*38fd1498Szrj     template<typename _Rep, typename _Period>
167*38fd1498Szrj       _GLIBCXX_ALWAYS_INLINE bool
168*38fd1498Szrj       _M_load_when_equal_for(unsigned __val, memory_order __mo,
169*38fd1498Szrj 	  const chrono::duration<_Rep, _Period>& __rtime)
170*38fd1498Szrj       {
171*38fd1498Szrj 	return _M_load_when_equal_until(__val, __mo,
172*38fd1498Szrj 					__clock_t::now() + __rtime);
173*38fd1498Szrj       }
174*38fd1498Szrj 
175*38fd1498Szrj     // Returns false iff a timeout occurred.
176*38fd1498Szrj     template<typename _Clock, typename _Duration>
177*38fd1498Szrj       _GLIBCXX_ALWAYS_INLINE bool
178*38fd1498Szrj       _M_load_when_equal_until(unsigned __val, memory_order __mo,
179*38fd1498Szrj 	  const chrono::time_point<_Clock, _Duration>& __atime)
180*38fd1498Szrj       {
181*38fd1498Szrj 	// DR 887 - Sync unknown clock to known clock.
182*38fd1498Szrj 	const typename _Clock::time_point __c_entry = _Clock::now();
183*38fd1498Szrj 	const __clock_t::time_point __s_entry = __clock_t::now();
184*38fd1498Szrj 	const auto __delta = __atime - __c_entry;
185*38fd1498Szrj 	const auto __s_atime = __s_entry + __delta;
186*38fd1498Szrj 	return _M_load_when_equal_until(__val, __mo, __s_atime);
187*38fd1498Szrj       }
188*38fd1498Szrj 
189*38fd1498Szrj     // Returns false iff a timeout occurred.
190*38fd1498Szrj     template<typename _Duration>
191*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE bool
192*38fd1498Szrj     _M_load_when_equal_until(unsigned __val, memory_order __mo,
193*38fd1498Szrj 	const chrono::time_point<__clock_t, _Duration>& __atime)
194*38fd1498Szrj     {
195*38fd1498Szrj       unsigned __i = _M_load(__mo);
196*38fd1498Szrj       if ((__i & ~_Waiter_bit) == __val)
197*38fd1498Szrj 	return true;
198*38fd1498Szrj       // TODO Spin-wait first.  Ignore effect on timeout.
199*38fd1498Szrj       __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
200*38fd1498Szrj       return (__i & ~_Waiter_bit) == __val;
201*38fd1498Szrj     }
202*38fd1498Szrj 
203*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE void
204*38fd1498Szrj     _M_store_notify_all(unsigned __val, memory_order __mo)
205*38fd1498Szrj     {
206*38fd1498Szrj       unsigned* __futex = (unsigned *)(void *)&_M_data;
207*38fd1498Szrj       if (_M_data.exchange(__val, __mo) & _Waiter_bit)
208*38fd1498Szrj 	_M_futex_notify_all(__futex);
209*38fd1498Szrj     }
210*38fd1498Szrj   };
211*38fd1498Szrj 
212*38fd1498Szrj #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
213*38fd1498Szrj 
214*38fd1498Szrj   // If futexes are not available, use a mutex and a condvar to wait.
215*38fd1498Szrj   // Because we access the data only within critical sections, all accesses
216*38fd1498Szrj   // are sequentially consistent; thus, we satisfy any provided memory_order.
217*38fd1498Szrj   template <unsigned _Waiter_bit = 0x80000000>
218*38fd1498Szrj   class __atomic_futex_unsigned
219*38fd1498Szrj   {
220*38fd1498Szrj     typedef chrono::system_clock __clock_t;
221*38fd1498Szrj 
222*38fd1498Szrj     unsigned _M_data;
223*38fd1498Szrj     mutex _M_mutex;
224*38fd1498Szrj     condition_variable _M_condvar;
225*38fd1498Szrj 
226*38fd1498Szrj   public:
227*38fd1498Szrj     explicit
228*38fd1498Szrj     __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
229*38fd1498Szrj     { }
230*38fd1498Szrj 
231*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE unsigned
232*38fd1498Szrj     _M_load(memory_order __mo)
233*38fd1498Szrj     {
234*38fd1498Szrj       unique_lock<mutex> __lock(_M_mutex);
235*38fd1498Szrj       return _M_data;
236*38fd1498Szrj     }
237*38fd1498Szrj 
238*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE unsigned
239*38fd1498Szrj     _M_load_when_not_equal(unsigned __val, memory_order __mo)
240*38fd1498Szrj     {
241*38fd1498Szrj       unique_lock<mutex> __lock(_M_mutex);
242*38fd1498Szrj       while (_M_data == __val)
243*38fd1498Szrj 	_M_condvar.wait(__lock);
244*38fd1498Szrj       return _M_data;
245*38fd1498Szrj     }
246*38fd1498Szrj 
247*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE void
248*38fd1498Szrj     _M_load_when_equal(unsigned __val, memory_order __mo)
249*38fd1498Szrj     {
250*38fd1498Szrj       unique_lock<mutex> __lock(_M_mutex);
251*38fd1498Szrj       while (_M_data != __val)
252*38fd1498Szrj 	_M_condvar.wait(__lock);
253*38fd1498Szrj     }
254*38fd1498Szrj 
255*38fd1498Szrj     template<typename _Rep, typename _Period>
256*38fd1498Szrj       _GLIBCXX_ALWAYS_INLINE bool
257*38fd1498Szrj       _M_load_when_equal_for(unsigned __val, memory_order __mo,
258*38fd1498Szrj 	  const chrono::duration<_Rep, _Period>& __rtime)
259*38fd1498Szrj       {
260*38fd1498Szrj 	unique_lock<mutex> __lock(_M_mutex);
261*38fd1498Szrj 	return _M_condvar.wait_for(__lock, __rtime,
262*38fd1498Szrj 				   [&] { return _M_data == __val;});
263*38fd1498Szrj       }
264*38fd1498Szrj 
265*38fd1498Szrj     template<typename _Clock, typename _Duration>
266*38fd1498Szrj       _GLIBCXX_ALWAYS_INLINE bool
267*38fd1498Szrj       _M_load_when_equal_until(unsigned __val, memory_order __mo,
268*38fd1498Szrj 	  const chrono::time_point<_Clock, _Duration>& __atime)
269*38fd1498Szrj       {
270*38fd1498Szrj 	unique_lock<mutex> __lock(_M_mutex);
271*38fd1498Szrj 	return _M_condvar.wait_until(__lock, __atime,
272*38fd1498Szrj 				     [&] { return _M_data == __val;});
273*38fd1498Szrj       }
274*38fd1498Szrj 
275*38fd1498Szrj     _GLIBCXX_ALWAYS_INLINE void
276*38fd1498Szrj     _M_store_notify_all(unsigned __val, memory_order __mo)
277*38fd1498Szrj     {
278*38fd1498Szrj       unique_lock<mutex> __lock(_M_mutex);
279*38fd1498Szrj       _M_data = __val;
280*38fd1498Szrj       _M_condvar.notify_all();
281*38fd1498Szrj     }
282*38fd1498Szrj   };
283*38fd1498Szrj 
284*38fd1498Szrj #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
285*38fd1498Szrj #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
286*38fd1498Szrj 
287*38fd1498Szrj _GLIBCXX_END_NAMESPACE_VERSION
288*38fd1498Szrj } // namespace std
289*38fd1498Szrj 
290*38fd1498Szrj #endif
291