xref: /netbsd-src/external/gpl3/gcc/dist/libstdc++-v3/include/bits/atomic_wait.h (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2020-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_wait.h
26  *  This is an internal header file, included by other library headers.
27  *  Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_WAIT_H
31 #define _GLIBCXX_ATOMIC_WAIT_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #if defined _GLIBCXX_HAS_GTHREADS || defined _GLIBCXX_HAVE_LINUX_FUTEX
37 #include <bits/functional_hash.h>
38 #include <bits/gthr.h>
39 #include <ext/numeric_traits.h>
40 
41 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
42 # include <cerrno>
43 # include <climits>
44 # include <unistd.h>
45 # include <syscall.h>
46 # include <bits/functexcept.h>
47 #endif
48 
49 # include <bits/std_mutex.h>  // std::mutex, std::__condvar
50 
51 #define __cpp_lib_atomic_wait 201907L
52 
_GLIBCXX_VISIBILITY(default)53 namespace std _GLIBCXX_VISIBILITY(default)
54 {
55 _GLIBCXX_BEGIN_NAMESPACE_VERSION
56   namespace __detail
57   {
58 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
59 #define _GLIBCXX_HAVE_PLATFORM_WAIT 1
60     using __platform_wait_t = int;
61     inline constexpr size_t __platform_wait_alignment = 4;
62 #else
63 // define _GLIBCX_HAVE_PLATFORM_WAIT and implement __platform_wait()
64 // and __platform_notify() if there is a more efficient primitive supported
65 // by the platform (e.g. __ulock_wait()/__ulock_wake()) which is better than
66 // a mutex/condvar based wait.
67     using __platform_wait_t = uint64_t;
68     inline constexpr size_t __platform_wait_alignment
69       = __alignof__(__platform_wait_t);
70 #endif
71   } // namespace __detail
72 
73   template<typename _Tp>
74     inline constexpr bool __platform_wait_uses_type
75 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
76       = is_scalar_v<_Tp>
77 	&& ((sizeof(_Tp) == sizeof(__detail::__platform_wait_t))
78 	&& (alignof(_Tp*) >= __detail::__platform_wait_alignment));
79 #else
80       = false;
81 #endif
82 
83   namespace __detail
84   {
85 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
86     enum class __futex_wait_flags : int
87     {
88 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
89       __private_flag = 128,
90 #else
91       __private_flag = 0,
92 #endif
93       __wait = 0,
94       __wake = 1,
95       __wait_bitset = 9,
96       __wake_bitset = 10,
97       __wait_private = __wait | __private_flag,
98       __wake_private = __wake | __private_flag,
99       __wait_bitset_private = __wait_bitset | __private_flag,
100       __wake_bitset_private = __wake_bitset | __private_flag,
101       __bitset_match_any = -1
102     };
103 
104     template<typename _Tp>
105       void
106       __platform_wait(const _Tp* __addr, __platform_wait_t __val) noexcept
107       {
108 	auto __e = syscall (SYS_futex, static_cast<const void*>(__addr),
109 			    static_cast<int>(__futex_wait_flags::__wait_private),
110 			    __val, nullptr);
111 	if (!__e || errno == EAGAIN)
112 	  return;
113 	if (errno != EINTR)
114 	  __throw_system_error(errno);
115       }
116 
117     template<typename _Tp>
118       void
119       __platform_notify(const _Tp* __addr, bool __all) noexcept
120       {
121 	syscall (SYS_futex, static_cast<const void*>(__addr),
122 		 static_cast<int>(__futex_wait_flags::__wake_private),
123 		 __all ? INT_MAX : 1);
124       }
125 #endif
126 
127     inline void
128     __thread_yield() noexcept
129     {
130 #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
131      __gthread_yield();
132 #endif
133     }
134 
135     inline void
136     __thread_relax() noexcept
137     {
138 #if defined __i386__ || defined __x86_64__
139       __builtin_ia32_pause();
140 #else
141       __thread_yield();
142 #endif
143     }
144 
145     inline constexpr auto __atomic_spin_count_relax = 12;
146     inline constexpr auto __atomic_spin_count = 16;
147 
148     struct __default_spin_policy
149     {
150       bool
151       operator()() const noexcept
152       { return false; }
153     };
154 
155     template<typename _Pred,
156 	     typename _Spin = __default_spin_policy>
157       bool
158       __atomic_spin(_Pred& __pred, _Spin __spin = _Spin{ }) noexcept
159       {
160 	for (auto __i = 0; __i < __atomic_spin_count; ++__i)
161 	  {
162 	    if (__pred())
163 	      return true;
164 
165 	    if (__i < __atomic_spin_count_relax)
166 	      __detail::__thread_relax();
167 	    else
168 	      __detail::__thread_yield();
169 	  }
170 
171 	while (__spin())
172 	  {
173 	    if (__pred())
174 	      return true;
175 	  }
176 
177 	return false;
178       }
179 
180     // return true if equal
181     template<typename _Tp>
182       bool __atomic_compare(const _Tp& __a, const _Tp& __b)
183       {
184 	// TODO make this do the correct padding bit ignoring comparison
185 	return __builtin_memcmp(&__a, &__b, sizeof(_Tp)) == 0;
186       }
187 
188     struct __waiter_pool_base
189     {
190       // Don't use std::hardware_destructive_interference_size here because we
191       // don't want the layout of library types to depend on compiler options.
192       static constexpr auto _S_align = 64;
193 
194       alignas(_S_align) __platform_wait_t _M_wait = 0;
195 
196 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
197       mutex _M_mtx;
198 #endif
199 
200       alignas(_S_align) __platform_wait_t _M_ver = 0;
201 
202 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
203       __condvar _M_cv;
204 #endif
205       __waiter_pool_base() = default;
206 
207       void
208       _M_enter_wait() noexcept
209       { __atomic_fetch_add(&_M_wait, 1, __ATOMIC_SEQ_CST); }
210 
211       void
212       _M_leave_wait() noexcept
213       { __atomic_fetch_sub(&_M_wait, 1, __ATOMIC_RELEASE); }
214 
215       bool
216       _M_waiting() const noexcept
217       {
218 	__platform_wait_t __res;
219 	__atomic_load(&_M_wait, &__res, __ATOMIC_SEQ_CST);
220 	return __res != 0;
221       }
222 
223       void
224       _M_notify(__platform_wait_t* __addr, [[maybe_unused]] bool __all,
225 		bool __bare) noexcept
226       {
227 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
228 	if (__addr == &_M_ver)
229 	  {
230 	    __atomic_fetch_add(__addr, 1, __ATOMIC_SEQ_CST);
231 	    __all = true;
232 	  }
233 
234 	if (__bare || _M_waiting())
235 	  __platform_notify(__addr, __all);
236 #else
237 	{
238 	  lock_guard<mutex> __l(_M_mtx);
239 	  __atomic_fetch_add(__addr, 1, __ATOMIC_RELAXED);
240 	}
241 	if (__bare || _M_waiting())
242 	  _M_cv.notify_all();
243 #endif
244       }
245 
246       static __waiter_pool_base&
247       _S_for(const void* __addr) noexcept
248       {
249 	constexpr uintptr_t __ct = 16;
250 	static __waiter_pool_base __w[__ct];
251 	auto __key = (uintptr_t(__addr) >> 2) % __ct;
252 	return __w[__key];
253       }
254     };
255 
256     struct __waiter_pool : __waiter_pool_base
257     {
258       void
259       _M_do_wait(const __platform_wait_t* __addr, __platform_wait_t __old) noexcept
260       {
261 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
262 	__platform_wait(__addr, __old);
263 #else
264 	__platform_wait_t __val;
265 	__atomic_load(__addr, &__val, __ATOMIC_SEQ_CST);
266 	if (__val == __old)
267 	  {
268 	    lock_guard<mutex> __l(_M_mtx);
269 	    __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
270 	    if (__val == __old)
271 	      _M_cv.wait(_M_mtx);
272 	  }
273 #endif // __GLIBCXX_HAVE_PLATFORM_WAIT
274       }
275     };
276 
277     template<typename _Tp>
278       struct __waiter_base
279       {
280 	using __waiter_type = _Tp;
281 
282 	__waiter_type& _M_w;
283 	__platform_wait_t* _M_addr;
284 
285 	template<typename _Up>
286 	  static __platform_wait_t*
287 	  _S_wait_addr(const _Up* __a, __platform_wait_t* __b)
288 	  {
289 	    if constexpr (__platform_wait_uses_type<_Up>)
290 	      return reinterpret_cast<__platform_wait_t*>(const_cast<_Up*>(__a));
291 	    else
292 	      return __b;
293 	  }
294 
295 	static __waiter_type&
296 	_S_for(const void* __addr) noexcept
297 	{
298 	  static_assert(sizeof(__waiter_type) == sizeof(__waiter_pool_base));
299 	  auto& res = __waiter_pool_base::_S_for(__addr);
300 	  return reinterpret_cast<__waiter_type&>(res);
301 	}
302 
303 	template<typename _Up>
304 	  explicit __waiter_base(const _Up* __addr) noexcept
305 	    : _M_w(_S_for(__addr))
306 	    , _M_addr(_S_wait_addr(__addr, &_M_w._M_ver))
307 	  { }
308 
309 	void
310 	_M_notify(bool __all, bool __bare = false) noexcept
311 	{ _M_w._M_notify(_M_addr, __all, __bare); }
312 
313 	template<typename _Up, typename _ValFn,
314 		 typename _Spin = __default_spin_policy>
315 	  static bool
316 	  _S_do_spin_v(__platform_wait_t* __addr,
317 		       const _Up& __old, _ValFn __vfn,
318 		       __platform_wait_t& __val,
319 		       _Spin __spin = _Spin{ })
320 	  {
321 	    auto const __pred = [=]
322 	      { return !__detail::__atomic_compare(__old, __vfn()); };
323 
324 	    if constexpr (__platform_wait_uses_type<_Up>)
325 	      {
326 		__builtin_memcpy(&__val, &__old, sizeof(__val));
327 	      }
328 	    else
329 	      {
330 		__atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
331 	      }
332 	    return __atomic_spin(__pred, __spin);
333 	  }
334 
335 	template<typename _Up, typename _ValFn,
336 		 typename _Spin = __default_spin_policy>
337 	  bool
338 	  _M_do_spin_v(const _Up& __old, _ValFn __vfn,
339 		       __platform_wait_t& __val,
340 		       _Spin __spin = _Spin{ })
341 	  { return _S_do_spin_v(_M_addr, __old, __vfn, __val, __spin); }
342 
343 	template<typename _Pred,
344 		 typename _Spin = __default_spin_policy>
345 	  static bool
346 	  _S_do_spin(const __platform_wait_t* __addr,
347 		     _Pred __pred,
348 		     __platform_wait_t& __val,
349 		     _Spin __spin = _Spin{ })
350 	  {
351 	    __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
352 	    return __atomic_spin(__pred, __spin);
353 	  }
354 
355 	template<typename _Pred,
356 		 typename _Spin = __default_spin_policy>
357 	  bool
358 	  _M_do_spin(_Pred __pred, __platform_wait_t& __val,
359 		     _Spin __spin = _Spin{ })
360 	  { return _S_do_spin(_M_addr, __pred, __val, __spin); }
361       };
362 
363     template<typename _EntersWait>
364       struct __waiter : __waiter_base<__waiter_pool>
365       {
366 	using __base_type = __waiter_base<__waiter_pool>;
367 
368 	template<typename _Tp>
369 	  explicit __waiter(const _Tp* __addr) noexcept
370 	    : __base_type(__addr)
371 	  {
372 	    if constexpr (_EntersWait::value)
373 	      _M_w._M_enter_wait();
374 	  }
375 
376 	~__waiter()
377 	{
378 	  if constexpr (_EntersWait::value)
379 	    _M_w._M_leave_wait();
380 	}
381 
382 	template<typename _Tp, typename _ValFn>
383 	  void
384 	  _M_do_wait_v(_Tp __old, _ValFn __vfn)
385 	  {
386 	    do
387 	      {
388 		__platform_wait_t __val;
389 		if (__base_type::_M_do_spin_v(__old, __vfn, __val))
390 		  return;
391 		__base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
392 	      }
393 	    while (__detail::__atomic_compare(__old, __vfn()));
394 	  }
395 
396 	template<typename _Pred>
397 	  void
398 	  _M_do_wait(_Pred __pred) noexcept
399 	  {
400 	    do
401 	      {
402 		__platform_wait_t __val;
403 		if (__base_type::_M_do_spin(__pred, __val))
404 		  return;
405 		__base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
406 	      }
407 	    while (!__pred());
408 	  }
409       };
410 
411     using __enters_wait = __waiter<std::true_type>;
412     using __bare_wait = __waiter<std::false_type>;
413   } // namespace __detail
414 
415   template<typename _Tp, typename _ValFn>
416     void
417     __atomic_wait_address_v(const _Tp* __addr, _Tp __old,
418 			    _ValFn __vfn) noexcept
419     {
420       __detail::__enters_wait __w(__addr);
421       __w._M_do_wait_v(__old, __vfn);
422     }
423 
424   template<typename _Tp, typename _Pred>
425     void
426     __atomic_wait_address(const _Tp* __addr, _Pred __pred) noexcept
427     {
428       __detail::__enters_wait __w(__addr);
429       __w._M_do_wait(__pred);
430     }
431 
432   // This call is to be used by atomic types which track contention externally
433   template<typename _Pred>
434     void
435     __atomic_wait_address_bare(const __detail::__platform_wait_t* __addr,
436 			       _Pred __pred) noexcept
437     {
438 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
439       do
440 	{
441 	  __detail::__platform_wait_t __val;
442 	  if (__detail::__bare_wait::_S_do_spin(__addr, __pred, __val))
443 	    return;
444 	  __detail::__platform_wait(__addr, __val);
445 	}
446       while (!__pred());
447 #else // !_GLIBCXX_HAVE_PLATFORM_WAIT
448       __detail::__bare_wait __w(__addr);
449       __w._M_do_wait(__pred);
450 #endif
451     }
452 
453   template<typename _Tp>
454     void
455     __atomic_notify_address(const _Tp* __addr, bool __all) noexcept
456     {
457       __detail::__bare_wait __w(__addr);
458       __w._M_notify(__all);
459     }
460 
461   // This call is to be used by atomic types which track contention externally
462   inline void
463   __atomic_notify_address_bare(const __detail::__platform_wait_t* __addr,
464 			       bool __all) noexcept
465   {
466 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
467     __detail::__platform_notify(__addr, __all);
468 #else
469     __detail::__bare_wait __w(__addr);
470     __w._M_notify(__all, true);
471 #endif
472   }
473 _GLIBCXX_END_NAMESPACE_VERSION
474 } // namespace std
475 #endif // GTHREADS || LINUX_FUTEX
476 #endif // _GLIBCXX_ATOMIC_WAIT_H
477