1// <stop_token> -*- C++ -*- 2 3// Copyright (C) 2019-2022 Free Software Foundation, Inc. 4// 5// This file is part of the GNU ISO C++ Library. This library is free 6// software; you can redistribute it and/or modify it under the 7// terms of the GNU General Public License as published by the 8// Free Software Foundation; either version 3, or (at your option) 9// any later version. 10 11// This library is distributed in the hope that it will be useful, 12// but WITHOUT ANY WARRANTY; without even the implied warranty of 13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14// GNU General Public License for more details. 15 16// Under Section 7 of GPL version 3, you are granted additional 17// permissions described in the GCC Runtime Library Exception, version 18// 3.1, as published by the Free Software Foundation. 19 20// You should have received a copy of the GNU General Public License and 21// a copy of the GCC Runtime Library Exception along with this program; 22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23// <http://www.gnu.org/licenses/>. 24 25/** @file include/stop_token 26 * This is a Standard C++ Library header. 27 */ 28 29#ifndef _GLIBCXX_STOP_TOKEN 30#define _GLIBCXX_STOP_TOKEN 31 32#if __cplusplus > 201703L 33 34#include <atomic> 35#include <bits/std_thread.h> 36 37#include <semaphore> 38 39#define __cpp_lib_jthread 201911L 40 41namespace std _GLIBCXX_VISIBILITY(default) 42{ 43_GLIBCXX_BEGIN_NAMESPACE_VERSION 44 45 /// Tag type indicating a stop_source should have no shared-stop-state. 46 struct nostopstate_t { explicit nostopstate_t() = default; }; 47 inline constexpr nostopstate_t nostopstate{}; 48 49 class stop_source; 50 51 /// Allow testing whether a stop request has been made on a `stop_source`. 52 class stop_token 53 { 54 public: 55 stop_token() noexcept = default; 56 57 stop_token(const stop_token&) noexcept = default; 58 stop_token(stop_token&&) noexcept = default; 59 60 ~stop_token() = default; 61 62 stop_token& 63 operator=(const stop_token&) noexcept = default; 64 65 stop_token& 66 operator=(stop_token&&) noexcept = default; 67 68 [[nodiscard]] 69 bool 70 stop_possible() const noexcept 71 { 72 return static_cast<bool>(_M_state) && _M_state->_M_stop_possible(); 73 } 74 75 [[nodiscard]] 76 bool 77 stop_requested() const noexcept 78 { 79 return static_cast<bool>(_M_state) && _M_state->_M_stop_requested(); 80 } 81 82 void 83 swap(stop_token& __rhs) noexcept 84 { _M_state.swap(__rhs._M_state); } 85 86 [[nodiscard]] 87 friend bool 88 operator==(const stop_token& __a, const stop_token& __b) 89 { return __a._M_state == __b._M_state; } 90 91 friend void 92 swap(stop_token& __lhs, stop_token& __rhs) noexcept 93 { __lhs.swap(__rhs); } 94 95 private: 96 friend class stop_source; 97 template<typename _Callback> 98 friend class stop_callback; 99 100 static void 101 _S_yield() noexcept 102 { 103#if defined __i386__ || defined __x86_64__ 104 __builtin_ia32_pause(); 105#endif 106 this_thread::yield(); 107 } 108 109#ifndef __cpp_lib_semaphore 110 struct binary_semaphore 111 { 112 explicit binary_semaphore(int __d) : _M_counter(__d > 0) { } 113 114 void release() { _M_counter.fetch_add(1, memory_order::release); } 115 116 void acquire() 117 { 118 int __old = 1; 119 while (!_M_counter.compare_exchange_weak(__old, 0, 120 memory_order::acquire, 121 memory_order::relaxed)) 122 { 123 __old = 1; 124 _S_yield(); 125 } 126 } 127 128 atomic<int> _M_counter; 129 }; 130#endif 131 132 struct _Stop_cb 133 { 134 using __cb_type = void(_Stop_cb*) noexcept; 135 __cb_type* _M_callback; 136 _Stop_cb* _M_prev = nullptr; 137 _Stop_cb* _M_next = nullptr; 138 bool* _M_destroyed = nullptr; 139 binary_semaphore _M_done{0}; 140 141 [[__gnu__::__nonnull__]] 142 explicit 143 _Stop_cb(__cb_type* __cb) 144 : _M_callback(__cb) 145 { } 146 147 void _M_run() noexcept { _M_callback(this); } 148 }; 149 150 struct _Stop_state_t 151 { 152 using value_type = uint32_t; 153 static constexpr value_type _S_stop_requested_bit = 1; 154 static constexpr value_type _S_locked_bit = 2; 155 static constexpr value_type _S_ssrc_counter_inc = 4; 156 157 std::atomic<value_type> _M_owners{1}; 158 std::atomic<value_type> _M_value{_S_ssrc_counter_inc}; 159 _Stop_cb* _M_head = nullptr; 160 std::thread::id _M_requester; 161 162 _Stop_state_t() = default; 163 164 bool 165 _M_stop_possible() noexcept 166 { 167 // true if a stop request has already been made or there are still 168 // stop_source objects that would allow one to be made. 169 return _M_value.load(memory_order::acquire) & ~_S_locked_bit; 170 } 171 172 bool 173 _M_stop_requested() noexcept 174 { 175 return _M_value.load(memory_order::acquire) & _S_stop_requested_bit; 176 } 177 178 void 179 _M_add_owner() noexcept 180 { 181 _M_owners.fetch_add(1, memory_order::relaxed); 182 } 183 184 void 185 _M_release_ownership() noexcept 186 { 187 if (_M_owners.fetch_sub(1, memory_order::acq_rel) == 1) 188 delete this; 189 } 190 191 void 192 _M_add_ssrc() noexcept 193 { 194 _M_value.fetch_add(_S_ssrc_counter_inc, memory_order::relaxed); 195 } 196 197 void 198 _M_sub_ssrc() noexcept 199 { 200 _M_value.fetch_sub(_S_ssrc_counter_inc, memory_order::release); 201 } 202 203 // Obtain lock. 204 void 205 _M_lock() noexcept 206 { 207 // Can use relaxed loads to get the current value. 208 // The successful call to _M_try_lock is an acquire operation. 209 auto __old = _M_value.load(memory_order::relaxed); 210 while (!_M_try_lock(__old, memory_order::relaxed)) 211 { } 212 } 213 214 // Precondition: calling thread holds the lock. 215 void 216 _M_unlock() noexcept 217 { 218 _M_value.fetch_sub(_S_locked_bit, memory_order::release); 219 } 220 221 bool 222 _M_request_stop() noexcept 223 { 224 // obtain lock and set stop_requested bit 225 auto __old = _M_value.load(memory_order::acquire); 226 do 227 { 228 if (__old & _S_stop_requested_bit) // stop request already made 229 return false; 230 } 231 while (!_M_try_lock_and_stop(__old)); 232 233 _M_requester = this_thread::get_id(); 234 235 while (_M_head) 236 { 237 bool __last_cb; 238 _Stop_cb* __cb = _M_head; 239 _M_head = _M_head->_M_next; 240 if (_M_head) 241 { 242 _M_head->_M_prev = nullptr; 243 __last_cb = false; 244 } 245 else 246 __last_cb = true; 247 248 // Allow other callbacks to be unregistered while __cb runs. 249 _M_unlock(); 250 251 bool __destroyed = false; 252 __cb->_M_destroyed = &__destroyed; 253 254 // run callback 255 __cb->_M_run(); 256 257 if (!__destroyed) 258 { 259 __cb->_M_destroyed = nullptr; 260 261 // synchronize with destructor of stop_callback that owns *__cb 262 if (!__gnu_cxx::__is_single_threaded()) 263 __cb->_M_done.release(); 264 } 265 266 // Avoid relocking if we already know there are no more callbacks. 267 if (__last_cb) 268 return true; 269 270 _M_lock(); 271 } 272 273 _M_unlock(); 274 return true; 275 } 276 277 [[__gnu__::__nonnull__]] 278 bool 279 _M_register_callback(_Stop_cb* __cb) noexcept 280 { 281 auto __old = _M_value.load(memory_order::acquire); 282 do 283 { 284 if (__old & _S_stop_requested_bit) // stop request already made 285 { 286 __cb->_M_run(); // run synchronously 287 return false; 288 } 289 290 if (__old < _S_ssrc_counter_inc) // no stop_source owns *this 291 // No need to register callback if no stop request can be made. 292 // Returning false also means the stop_callback does not share 293 // ownership of this state, but that's not observable. 294 return false; 295 } 296 while (!_M_try_lock(__old)); 297 298 __cb->_M_next = _M_head; 299 if (_M_head) 300 { 301 _M_head->_M_prev = __cb; 302 } 303 _M_head = __cb; 304 _M_unlock(); 305 return true; 306 } 307 308 // Called by ~stop_callback just before destroying *__cb. 309 [[__gnu__::__nonnull__]] 310 void 311 _M_remove_callback(_Stop_cb* __cb) 312 { 313 _M_lock(); 314 315 if (__cb == _M_head) 316 { 317 _M_head = _M_head->_M_next; 318 if (_M_head) 319 _M_head->_M_prev = nullptr; 320 _M_unlock(); 321 return; 322 } 323 else if (__cb->_M_prev) 324 { 325 __cb->_M_prev->_M_next = __cb->_M_next; 326 if (__cb->_M_next) 327 __cb->_M_next->_M_prev = __cb->_M_prev; 328 _M_unlock(); 329 return; 330 } 331 332 _M_unlock(); 333 334 // Callback is not in the list, so must have been removed by a call to 335 // _M_request_stop. 336 337 // Despite appearances there is no data race on _M_requester. The only 338 // write to it happens before the callback is removed from the list, 339 // and removing it from the list happens before this read. 340 if (!(_M_requester == this_thread::get_id())) 341 { 342 // Synchronize with completion of callback. 343 __cb->_M_done.acquire(); 344 // Safe for ~stop_callback to destroy *__cb now. 345 return; 346 } 347 348 if (__cb->_M_destroyed) 349 *__cb->_M_destroyed = true; 350 } 351 352 // Try to obtain the lock. 353 // Returns true if the lock is acquired (with memory order acquire). 354 // Otherwise, sets __curval = _M_value.load(__failure) and returns false. 355 // Might fail spuriously, so must be called in a loop. 356 bool 357 _M_try_lock(value_type& __curval, 358 memory_order __failure = memory_order::acquire) noexcept 359 { 360 return _M_do_try_lock(__curval, 0, memory_order::acquire, __failure); 361 } 362 363 // Try to obtain the lock to make a stop request. 364 // Returns true if the lock is acquired and the _S_stop_requested_bit is 365 // set (with memory order acq_rel so that other threads see the request). 366 // Otherwise, sets __curval = _M_value.load(memory_order::acquire) and 367 // returns false. 368 // Might fail spuriously, so must be called in a loop. 369 bool 370 _M_try_lock_and_stop(value_type& __curval) noexcept 371 { 372 return _M_do_try_lock(__curval, _S_stop_requested_bit, 373 memory_order::acq_rel, memory_order::acquire); 374 } 375 376 bool 377 _M_do_try_lock(value_type& __curval, value_type __newbits, 378 memory_order __success, memory_order __failure) noexcept 379 { 380 if (__curval & _S_locked_bit) 381 { 382 _S_yield(); 383 __curval = _M_value.load(__failure); 384 return false; 385 } 386 __newbits |= _S_locked_bit; 387 return _M_value.compare_exchange_weak(__curval, __curval | __newbits, 388 __success, __failure); 389 } 390 }; 391 392 struct _Stop_state_ref 393 { 394 _Stop_state_ref() = default; 395 396 [[__gnu__::__access__(__none__, 2)]] 397 explicit 398 _Stop_state_ref(const stop_source&) 399 : _M_ptr(new _Stop_state_t()) 400 { } 401 402 _Stop_state_ref(const _Stop_state_ref& __other) noexcept 403 : _M_ptr(__other._M_ptr) 404 { 405 if (_M_ptr) 406 _M_ptr->_M_add_owner(); 407 } 408 409 _Stop_state_ref(_Stop_state_ref&& __other) noexcept 410 : _M_ptr(__other._M_ptr) 411 { 412 __other._M_ptr = nullptr; 413 } 414 415 _Stop_state_ref& 416 operator=(const _Stop_state_ref& __other) noexcept 417 { 418 if (auto __ptr = __other._M_ptr; __ptr != _M_ptr) 419 { 420 if (__ptr) 421 __ptr->_M_add_owner(); 422 if (_M_ptr) 423 _M_ptr->_M_release_ownership(); 424 _M_ptr = __ptr; 425 } 426 return *this; 427 } 428 429 _Stop_state_ref& 430 operator=(_Stop_state_ref&& __other) noexcept 431 { 432 _Stop_state_ref(std::move(__other)).swap(*this); 433 return *this; 434 } 435 436 ~_Stop_state_ref() 437 { 438 if (_M_ptr) 439 _M_ptr->_M_release_ownership(); 440 } 441 442 void 443 swap(_Stop_state_ref& __other) noexcept 444 { std::swap(_M_ptr, __other._M_ptr); } 445 446 explicit operator bool() const noexcept { return _M_ptr != nullptr; } 447 448 _Stop_state_t* operator->() const noexcept { return _M_ptr; } 449 450#if __cpp_impl_three_way_comparison >= 201907L 451 friend bool 452 operator==(const _Stop_state_ref&, const _Stop_state_ref&) = default; 453#else 454 friend bool 455 operator==(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs) 456 noexcept 457 { return __lhs._M_ptr == __rhs._M_ptr; } 458 459 friend bool 460 operator!=(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs) 461 noexcept 462 { return __lhs._M_ptr != __rhs._M_ptr; } 463#endif 464 465 private: 466 _Stop_state_t* _M_ptr = nullptr; 467 }; 468 469 _Stop_state_ref _M_state; 470 471 explicit 472 stop_token(const _Stop_state_ref& __state) noexcept 473 : _M_state{__state} 474 { } 475 }; 476 477 /// A type that allows a stop request to be made. 478 class stop_source 479 { 480 public: 481 stop_source() : _M_state(*this) 482 { } 483 484 explicit stop_source(std::nostopstate_t) noexcept 485 { } 486 487 stop_source(const stop_source& __other) noexcept 488 : _M_state(__other._M_state) 489 { 490 if (_M_state) 491 _M_state->_M_add_ssrc(); 492 } 493 494 stop_source(stop_source&&) noexcept = default; 495 496 stop_source& 497 operator=(const stop_source& __other) noexcept 498 { 499 if (_M_state != __other._M_state) 500 { 501 stop_source __sink(std::move(*this)); 502 _M_state = __other._M_state; 503 if (_M_state) 504 _M_state->_M_add_ssrc(); 505 } 506 return *this; 507 } 508 509 stop_source& 510 operator=(stop_source&&) noexcept = default; 511 512 ~stop_source() 513 { 514 if (_M_state) 515 _M_state->_M_sub_ssrc(); 516 } 517 518 [[nodiscard]] 519 bool 520 stop_possible() const noexcept 521 { 522 return static_cast<bool>(_M_state); 523 } 524 525 [[nodiscard]] 526 bool 527 stop_requested() const noexcept 528 { 529 return static_cast<bool>(_M_state) && _M_state->_M_stop_requested(); 530 } 531 532 bool 533 request_stop() const noexcept 534 { 535 if (stop_possible()) 536 return _M_state->_M_request_stop(); 537 return false; 538 } 539 540 [[nodiscard]] 541 stop_token 542 get_token() const noexcept 543 { 544 return stop_token{_M_state}; 545 } 546 547 void 548 swap(stop_source& __other) noexcept 549 { 550 _M_state.swap(__other._M_state); 551 } 552 553 [[nodiscard]] 554 friend bool 555 operator==(const stop_source& __a, const stop_source& __b) noexcept 556 { 557 return __a._M_state == __b._M_state; 558 } 559 560 friend void 561 swap(stop_source& __lhs, stop_source& __rhs) noexcept 562 { 563 __lhs.swap(__rhs); 564 } 565 566 private: 567 stop_token::_Stop_state_ref _M_state; 568 }; 569 570 /// A wrapper for callbacks to be run when a stop request is made. 571 template<typename _Callback> 572 class [[nodiscard]] stop_callback 573 { 574 static_assert(is_nothrow_destructible_v<_Callback>); 575 static_assert(is_invocable_v<_Callback>); 576 577 public: 578 using callback_type = _Callback; 579 580 template<typename _Cb, 581 enable_if_t<is_constructible_v<_Callback, _Cb>, int> = 0> 582 explicit 583 stop_callback(const stop_token& __token, _Cb&& __cb) 584 noexcept(is_nothrow_constructible_v<_Callback, _Cb>) 585 : _M_cb(std::forward<_Cb>(__cb)) 586 { 587 if (auto __state = __token._M_state) 588 { 589 if (__state->_M_register_callback(&_M_cb)) 590 _M_state.swap(__state); 591 } 592 } 593 594 template<typename _Cb, 595 enable_if_t<is_constructible_v<_Callback, _Cb>, int> = 0> 596 explicit 597 stop_callback(stop_token&& __token, _Cb&& __cb) 598 noexcept(is_nothrow_constructible_v<_Callback, _Cb>) 599 : _M_cb(std::forward<_Cb>(__cb)) 600 { 601 if (auto& __state = __token._M_state) 602 { 603 if (__state->_M_register_callback(&_M_cb)) 604 _M_state.swap(__state); 605 } 606 } 607 608 ~stop_callback() 609 { 610 if (_M_state) 611 { 612 _M_state->_M_remove_callback(&_M_cb); 613 } 614 } 615 616 stop_callback(const stop_callback&) = delete; 617 stop_callback& operator=(const stop_callback&) = delete; 618 stop_callback(stop_callback&&) = delete; 619 stop_callback& operator=(stop_callback&&) = delete; 620 621 private: 622 struct _Cb_impl : stop_token::_Stop_cb 623 { 624 template<typename _Cb> 625 explicit 626 _Cb_impl(_Cb&& __cb) 627 : _Stop_cb(&_S_execute), 628 _M_cb(std::forward<_Cb>(__cb)) 629 { } 630 631 _Callback _M_cb; 632 633 [[__gnu__::__nonnull__]] 634 static void 635 _S_execute(_Stop_cb* __that) noexcept 636 { 637 _Callback& __cb = static_cast<_Cb_impl*>(__that)->_M_cb; 638 std::forward<_Callback>(__cb)(); 639 } 640 }; 641 642 _Cb_impl _M_cb; 643 stop_token::_Stop_state_ref _M_state; 644 }; 645 646 template<typename _Callback> 647 stop_callback(stop_token, _Callback) -> stop_callback<_Callback>; 648 649_GLIBCXX_END_NAMESPACE_VERSION 650} // namespace 651#endif // __cplusplus > 201703L 652#endif // _GLIBCXX_STOP_TOKEN 653