1 // -*- C++ -*- header. 2 3 // Copyright (C) 2008-2022 Free Software Foundation, Inc. 4 // 5 // This file is part of the GNU ISO C++ Library. This library is free 6 // software; you can redistribute it and/or modify it under the 7 // terms of the GNU General Public License as published by the 8 // Free Software Foundation; either version 3, or (at your option) 9 // any later version. 10 11 // This library is distributed in the hope that it will be useful, 12 // but WITHOUT ANY WARRANTY; without even the implied warranty of 13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 // GNU General Public License for more details. 15 16 // Under Section 7 of GPL version 3, you are granted additional 17 // permissions described in the GCC Runtime Library Exception, version 18 // 3.1, as published by the Free Software Foundation. 19 20 // You should have received a copy of the GNU General Public License and 21 // a copy of the GCC Runtime Library Exception along with this program; 22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23 // <http://www.gnu.org/licenses/>. 24 25 /** @file bits/atomic_base.h 26 * This is an internal header file, included by other library headers. 27 * Do not attempt to use it directly. @headername{atomic} 28 */ 29 30 #ifndef _GLIBCXX_ATOMIC_BASE_H 31 #define _GLIBCXX_ATOMIC_BASE_H 1 32 33 #pragma GCC system_header 34 35 #include <bits/c++config.h> 36 #include <stdint.h> 37 #include <bits/atomic_lockfree_defines.h> 38 #include <bits/move.h> 39 40 #if __cplusplus > 201703L && _GLIBCXX_HOSTED 41 #include <bits/atomic_wait.h> 42 #endif 43 44 #ifndef _GLIBCXX_ALWAYS_INLINE 45 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__)) 46 #endif 47 48 namespace std _GLIBCXX_VISIBILITY(default) 49 { 50 _GLIBCXX_BEGIN_NAMESPACE_VERSION 51 52 /** 53 * @defgroup atomics Atomics 54 * 55 * Components for performing atomic operations. 56 * @{ 57 */ 58 59 /// Enumeration for memory_order 60 #if __cplusplus > 201703L 61 enum class memory_order : int 62 { 63 relaxed, 64 consume, 65 acquire, 66 release, 67 acq_rel, 68 seq_cst 69 }; 70 71 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed; 72 inline constexpr memory_order memory_order_consume = memory_order::consume; 73 inline constexpr memory_order memory_order_acquire = memory_order::acquire; 74 inline constexpr memory_order memory_order_release = memory_order::release; 75 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel; 76 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst; 77 #else 78 typedef enum memory_order 79 { 80 memory_order_relaxed, 81 memory_order_consume, 82 memory_order_acquire, 83 memory_order_release, 84 memory_order_acq_rel, 85 memory_order_seq_cst 86 } memory_order; 87 #endif 88 89 /// @cond undocumented 90 enum __memory_order_modifier 91 { 92 __memory_order_mask = 0x0ffff, 93 __memory_order_modifier_mask = 0xffff0000, 94 __memory_order_hle_acquire = 0x10000, 95 __memory_order_hle_release = 0x20000 96 }; 97 /// @endcond 98 99 constexpr memory_order 100 operator|(memory_order __m, __memory_order_modifier __mod) 101 { 102 return memory_order(int(__m) | int(__mod)); 103 } 104 105 constexpr memory_order 106 operator&(memory_order __m, __memory_order_modifier __mod) 107 { 108 return memory_order(int(__m) & int(__mod)); 109 } 110 111 /// @cond undocumented 112 113 // Drop release ordering as per [atomics.types.operations.req]/21 114 constexpr memory_order 115 __cmpexch_failure_order2(memory_order __m) noexcept 116 { 117 return __m == memory_order_acq_rel ? memory_order_acquire 118 : __m == memory_order_release ? memory_order_relaxed : __m; 119 } 120 121 constexpr memory_order 122 __cmpexch_failure_order(memory_order __m) noexcept 123 { 124 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask) 125 | __memory_order_modifier(__m & __memory_order_modifier_mask)); 126 } 127 128 constexpr bool 129 __is_valid_cmpexch_failure_order(memory_order __m) noexcept 130 { 131 return (__m & __memory_order_mask) != memory_order_release 132 && (__m & __memory_order_mask) != memory_order_acq_rel; 133 } 134 135 // Base types for atomics. 136 template<typename _IntTp> 137 struct __atomic_base; 138 139 /// @endcond 140 141 _GLIBCXX_ALWAYS_INLINE void 142 atomic_thread_fence(memory_order __m) noexcept 143 { __atomic_thread_fence(int(__m)); } 144 145 _GLIBCXX_ALWAYS_INLINE void 146 atomic_signal_fence(memory_order __m) noexcept 147 { __atomic_signal_fence(int(__m)); } 148 149 /// kill_dependency 150 template<typename _Tp> 151 inline _Tp 152 kill_dependency(_Tp __y) noexcept 153 { 154 _Tp __ret(__y); 155 return __ret; 156 } 157 158 #if __cplusplus >= 202002L 159 # define __cpp_lib_atomic_value_initialization 201911L 160 #endif 161 162 /// @cond undocumented 163 #if __cpp_lib_atomic_value_initialization 164 # define _GLIBCXX20_INIT(I) = I 165 #else 166 # define _GLIBCXX20_INIT(I) 167 #endif 168 /// @endcond 169 170 #define ATOMIC_VAR_INIT(_VI) { _VI } 171 172 template<typename _Tp> 173 struct atomic; 174 175 template<typename _Tp> 176 struct atomic<_Tp*>; 177 178 /* The target's "set" value for test-and-set may not be exactly 1. */ 179 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1 180 typedef bool __atomic_flag_data_type; 181 #else 182 typedef unsigned char __atomic_flag_data_type; 183 #endif 184 185 /// @cond undocumented 186 187 /* 188 * Base type for atomic_flag. 189 * 190 * Base type is POD with data, allowing atomic_flag to derive from 191 * it and meet the standard layout type requirement. In addition to 192 * compatibility with a C interface, this allows different 193 * implementations of atomic_flag to use the same atomic operation 194 * functions, via a standard conversion to the __atomic_flag_base 195 * argument. 196 */ 197 _GLIBCXX_BEGIN_EXTERN_C 198 199 struct __atomic_flag_base 200 { 201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({}); 202 }; 203 204 _GLIBCXX_END_EXTERN_C 205 206 /// @endcond 207 208 #define ATOMIC_FLAG_INIT { 0 } 209 210 /// atomic_flag 211 struct atomic_flag : public __atomic_flag_base 212 { 213 atomic_flag() noexcept = default; 214 ~atomic_flag() noexcept = default; 215 atomic_flag(const atomic_flag&) = delete; 216 atomic_flag& operator=(const atomic_flag&) = delete; 217 atomic_flag& operator=(const atomic_flag&) volatile = delete; 218 219 // Conversion to ATOMIC_FLAG_INIT. 220 constexpr atomic_flag(bool __i) noexcept 221 : __atomic_flag_base{ _S_init(__i) } 222 { } 223 224 _GLIBCXX_ALWAYS_INLINE bool 225 test_and_set(memory_order __m = memory_order_seq_cst) noexcept 226 { 227 return __atomic_test_and_set (&_M_i, int(__m)); 228 } 229 230 _GLIBCXX_ALWAYS_INLINE bool 231 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept 232 { 233 return __atomic_test_and_set (&_M_i, int(__m)); 234 } 235 236 #if __cplusplus > 201703L 237 #define __cpp_lib_atomic_flag_test 201907L 238 239 _GLIBCXX_ALWAYS_INLINE bool 240 test(memory_order __m = memory_order_seq_cst) const noexcept 241 { 242 __atomic_flag_data_type __v; 243 __atomic_load(&_M_i, &__v, int(__m)); 244 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL; 245 } 246 247 _GLIBCXX_ALWAYS_INLINE bool 248 test(memory_order __m = memory_order_seq_cst) const volatile noexcept 249 { 250 __atomic_flag_data_type __v; 251 __atomic_load(&_M_i, &__v, int(__m)); 252 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL; 253 } 254 255 #if __cpp_lib_atomic_wait 256 _GLIBCXX_ALWAYS_INLINE void 257 wait(bool __old, 258 memory_order __m = memory_order_seq_cst) const noexcept 259 { 260 const __atomic_flag_data_type __v 261 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; 262 263 std::__atomic_wait_address_v(&_M_i, __v, 264 [__m, this] { return __atomic_load_n(&_M_i, int(__m)); }); 265 } 266 267 // TODO add const volatile overload 268 269 _GLIBCXX_ALWAYS_INLINE void 270 notify_one() noexcept 271 { std::__atomic_notify_address(&_M_i, false); } 272 273 // TODO add const volatile overload 274 275 _GLIBCXX_ALWAYS_INLINE void 276 notify_all() noexcept 277 { std::__atomic_notify_address(&_M_i, true); } 278 279 // TODO add const volatile overload 280 #endif // __cpp_lib_atomic_wait 281 #endif // C++20 282 283 _GLIBCXX_ALWAYS_INLINE void 284 clear(memory_order __m = memory_order_seq_cst) noexcept 285 { 286 memory_order __b __attribute__ ((__unused__)) 287 = __m & __memory_order_mask; 288 __glibcxx_assert(__b != memory_order_consume); 289 __glibcxx_assert(__b != memory_order_acquire); 290 __glibcxx_assert(__b != memory_order_acq_rel); 291 292 __atomic_clear (&_M_i, int(__m)); 293 } 294 295 _GLIBCXX_ALWAYS_INLINE void 296 clear(memory_order __m = memory_order_seq_cst) volatile noexcept 297 { 298 memory_order __b __attribute__ ((__unused__)) 299 = __m & __memory_order_mask; 300 __glibcxx_assert(__b != memory_order_consume); 301 __glibcxx_assert(__b != memory_order_acquire); 302 __glibcxx_assert(__b != memory_order_acq_rel); 303 304 __atomic_clear (&_M_i, int(__m)); 305 } 306 307 private: 308 static constexpr __atomic_flag_data_type 309 _S_init(bool __i) 310 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; } 311 }; 312 313 /// @cond undocumented 314 315 /// Base class for atomic integrals. 316 // 317 // For each of the integral types, define atomic_[integral type] struct 318 // 319 // atomic_bool bool 320 // atomic_char char 321 // atomic_schar signed char 322 // atomic_uchar unsigned char 323 // atomic_short short 324 // atomic_ushort unsigned short 325 // atomic_int int 326 // atomic_uint unsigned int 327 // atomic_long long 328 // atomic_ulong unsigned long 329 // atomic_llong long long 330 // atomic_ullong unsigned long long 331 // atomic_char8_t char8_t 332 // atomic_char16_t char16_t 333 // atomic_char32_t char32_t 334 // atomic_wchar_t wchar_t 335 // 336 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 337 // 8 bytes, since that is what GCC built-in functions for atomic 338 // memory access expect. 339 template<typename _ITp> 340 struct __atomic_base 341 { 342 using value_type = _ITp; 343 using difference_type = value_type; 344 345 private: 346 typedef _ITp __int_type; 347 348 static constexpr int _S_alignment = 349 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp); 350 351 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0); 352 353 public: 354 __atomic_base() noexcept = default; 355 ~__atomic_base() noexcept = default; 356 __atomic_base(const __atomic_base&) = delete; 357 __atomic_base& operator=(const __atomic_base&) = delete; 358 __atomic_base& operator=(const __atomic_base&) volatile = delete; 359 360 // Requires __int_type convertible to _M_i. 361 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { } 362 363 operator __int_type() const noexcept 364 { return load(); } 365 366 operator __int_type() const volatile noexcept 367 { return load(); } 368 369 __int_type 370 operator=(__int_type __i) noexcept 371 { 372 store(__i); 373 return __i; 374 } 375 376 __int_type 377 operator=(__int_type __i) volatile noexcept 378 { 379 store(__i); 380 return __i; 381 } 382 383 __int_type 384 operator++(int) noexcept 385 { return fetch_add(1); } 386 387 __int_type 388 operator++(int) volatile noexcept 389 { return fetch_add(1); } 390 391 __int_type 392 operator--(int) noexcept 393 { return fetch_sub(1); } 394 395 __int_type 396 operator--(int) volatile noexcept 397 { return fetch_sub(1); } 398 399 __int_type 400 operator++() noexcept 401 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); } 402 403 __int_type 404 operator++() volatile noexcept 405 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); } 406 407 __int_type 408 operator--() noexcept 409 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); } 410 411 __int_type 412 operator--() volatile noexcept 413 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); } 414 415 __int_type 416 operator+=(__int_type __i) noexcept 417 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 418 419 __int_type 420 operator+=(__int_type __i) volatile noexcept 421 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 422 423 __int_type 424 operator-=(__int_type __i) noexcept 425 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 426 427 __int_type 428 operator-=(__int_type __i) volatile noexcept 429 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 430 431 __int_type 432 operator&=(__int_type __i) noexcept 433 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 434 435 __int_type 436 operator&=(__int_type __i) volatile noexcept 437 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 438 439 __int_type 440 operator|=(__int_type __i) noexcept 441 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 442 443 __int_type 444 operator|=(__int_type __i) volatile noexcept 445 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 446 447 __int_type 448 operator^=(__int_type __i) noexcept 449 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 450 451 __int_type 452 operator^=(__int_type __i) volatile noexcept 453 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); } 454 455 bool 456 is_lock_free() const noexcept 457 { 458 // Use a fake, minimally aligned pointer. 459 return __atomic_is_lock_free(sizeof(_M_i), 460 reinterpret_cast<void *>(-_S_alignment)); 461 } 462 463 bool 464 is_lock_free() const volatile noexcept 465 { 466 // Use a fake, minimally aligned pointer. 467 return __atomic_is_lock_free(sizeof(_M_i), 468 reinterpret_cast<void *>(-_S_alignment)); 469 } 470 471 _GLIBCXX_ALWAYS_INLINE void 472 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept 473 { 474 memory_order __b __attribute__ ((__unused__)) 475 = __m & __memory_order_mask; 476 __glibcxx_assert(__b != memory_order_acquire); 477 __glibcxx_assert(__b != memory_order_acq_rel); 478 __glibcxx_assert(__b != memory_order_consume); 479 480 __atomic_store_n(&_M_i, __i, int(__m)); 481 } 482 483 _GLIBCXX_ALWAYS_INLINE void 484 store(__int_type __i, 485 memory_order __m = memory_order_seq_cst) volatile noexcept 486 { 487 memory_order __b __attribute__ ((__unused__)) 488 = __m & __memory_order_mask; 489 __glibcxx_assert(__b != memory_order_acquire); 490 __glibcxx_assert(__b != memory_order_acq_rel); 491 __glibcxx_assert(__b != memory_order_consume); 492 493 __atomic_store_n(&_M_i, __i, int(__m)); 494 } 495 496 _GLIBCXX_ALWAYS_INLINE __int_type 497 load(memory_order __m = memory_order_seq_cst) const noexcept 498 { 499 memory_order __b __attribute__ ((__unused__)) 500 = __m & __memory_order_mask; 501 __glibcxx_assert(__b != memory_order_release); 502 __glibcxx_assert(__b != memory_order_acq_rel); 503 504 return __atomic_load_n(&_M_i, int(__m)); 505 } 506 507 _GLIBCXX_ALWAYS_INLINE __int_type 508 load(memory_order __m = memory_order_seq_cst) const volatile noexcept 509 { 510 memory_order __b __attribute__ ((__unused__)) 511 = __m & __memory_order_mask; 512 __glibcxx_assert(__b != memory_order_release); 513 __glibcxx_assert(__b != memory_order_acq_rel); 514 515 return __atomic_load_n(&_M_i, int(__m)); 516 } 517 518 _GLIBCXX_ALWAYS_INLINE __int_type 519 exchange(__int_type __i, 520 memory_order __m = memory_order_seq_cst) noexcept 521 { 522 return __atomic_exchange_n(&_M_i, __i, int(__m)); 523 } 524 525 526 _GLIBCXX_ALWAYS_INLINE __int_type 527 exchange(__int_type __i, 528 memory_order __m = memory_order_seq_cst) volatile noexcept 529 { 530 return __atomic_exchange_n(&_M_i, __i, int(__m)); 531 } 532 533 _GLIBCXX_ALWAYS_INLINE bool 534 compare_exchange_weak(__int_type& __i1, __int_type __i2, 535 memory_order __m1, memory_order __m2) noexcept 536 { 537 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); 538 539 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, 540 int(__m1), int(__m2)); 541 } 542 543 _GLIBCXX_ALWAYS_INLINE bool 544 compare_exchange_weak(__int_type& __i1, __int_type __i2, 545 memory_order __m1, 546 memory_order __m2) volatile noexcept 547 { 548 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); 549 550 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, 551 int(__m1), int(__m2)); 552 } 553 554 _GLIBCXX_ALWAYS_INLINE bool 555 compare_exchange_weak(__int_type& __i1, __int_type __i2, 556 memory_order __m = memory_order_seq_cst) noexcept 557 { 558 return compare_exchange_weak(__i1, __i2, __m, 559 __cmpexch_failure_order(__m)); 560 } 561 562 _GLIBCXX_ALWAYS_INLINE bool 563 compare_exchange_weak(__int_type& __i1, __int_type __i2, 564 memory_order __m = memory_order_seq_cst) volatile noexcept 565 { 566 return compare_exchange_weak(__i1, __i2, __m, 567 __cmpexch_failure_order(__m)); 568 } 569 570 _GLIBCXX_ALWAYS_INLINE bool 571 compare_exchange_strong(__int_type& __i1, __int_type __i2, 572 memory_order __m1, memory_order __m2) noexcept 573 { 574 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); 575 576 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, 577 int(__m1), int(__m2)); 578 } 579 580 _GLIBCXX_ALWAYS_INLINE bool 581 compare_exchange_strong(__int_type& __i1, __int_type __i2, 582 memory_order __m1, 583 memory_order __m2) volatile noexcept 584 { 585 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); 586 587 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, 588 int(__m1), int(__m2)); 589 } 590 591 _GLIBCXX_ALWAYS_INLINE bool 592 compare_exchange_strong(__int_type& __i1, __int_type __i2, 593 memory_order __m = memory_order_seq_cst) noexcept 594 { 595 return compare_exchange_strong(__i1, __i2, __m, 596 __cmpexch_failure_order(__m)); 597 } 598 599 _GLIBCXX_ALWAYS_INLINE bool 600 compare_exchange_strong(__int_type& __i1, __int_type __i2, 601 memory_order __m = memory_order_seq_cst) volatile noexcept 602 { 603 return compare_exchange_strong(__i1, __i2, __m, 604 __cmpexch_failure_order(__m)); 605 } 606 607 #if __cpp_lib_atomic_wait 608 _GLIBCXX_ALWAYS_INLINE void 609 wait(__int_type __old, 610 memory_order __m = memory_order_seq_cst) const noexcept 611 { 612 std::__atomic_wait_address_v(&_M_i, __old, 613 [__m, this] { return this->load(__m); }); 614 } 615 616 // TODO add const volatile overload 617 618 _GLIBCXX_ALWAYS_INLINE void 619 notify_one() noexcept 620 { std::__atomic_notify_address(&_M_i, false); } 621 622 // TODO add const volatile overload 623 624 _GLIBCXX_ALWAYS_INLINE void 625 notify_all() noexcept 626 { std::__atomic_notify_address(&_M_i, true); } 627 628 // TODO add const volatile overload 629 #endif // __cpp_lib_atomic_wait 630 631 _GLIBCXX_ALWAYS_INLINE __int_type 632 fetch_add(__int_type __i, 633 memory_order __m = memory_order_seq_cst) noexcept 634 { return __atomic_fetch_add(&_M_i, __i, int(__m)); } 635 636 _GLIBCXX_ALWAYS_INLINE __int_type 637 fetch_add(__int_type __i, 638 memory_order __m = memory_order_seq_cst) volatile noexcept 639 { return __atomic_fetch_add(&_M_i, __i, int(__m)); } 640 641 _GLIBCXX_ALWAYS_INLINE __int_type 642 fetch_sub(__int_type __i, 643 memory_order __m = memory_order_seq_cst) noexcept 644 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); } 645 646 _GLIBCXX_ALWAYS_INLINE __int_type 647 fetch_sub(__int_type __i, 648 memory_order __m = memory_order_seq_cst) volatile noexcept 649 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); } 650 651 _GLIBCXX_ALWAYS_INLINE __int_type 652 fetch_and(__int_type __i, 653 memory_order __m = memory_order_seq_cst) noexcept 654 { return __atomic_fetch_and(&_M_i, __i, int(__m)); } 655 656 _GLIBCXX_ALWAYS_INLINE __int_type 657 fetch_and(__int_type __i, 658 memory_order __m = memory_order_seq_cst) volatile noexcept 659 { return __atomic_fetch_and(&_M_i, __i, int(__m)); } 660 661 _GLIBCXX_ALWAYS_INLINE __int_type 662 fetch_or(__int_type __i, 663 memory_order __m = memory_order_seq_cst) noexcept 664 { return __atomic_fetch_or(&_M_i, __i, int(__m)); } 665 666 _GLIBCXX_ALWAYS_INLINE __int_type 667 fetch_or(__int_type __i, 668 memory_order __m = memory_order_seq_cst) volatile noexcept 669 { return __atomic_fetch_or(&_M_i, __i, int(__m)); } 670 671 _GLIBCXX_ALWAYS_INLINE __int_type 672 fetch_xor(__int_type __i, 673 memory_order __m = memory_order_seq_cst) noexcept 674 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); } 675 676 _GLIBCXX_ALWAYS_INLINE __int_type 677 fetch_xor(__int_type __i, 678 memory_order __m = memory_order_seq_cst) volatile noexcept 679 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); } 680 }; 681 682 683 /// Partial specialization for pointer types. 684 template<typename _PTp> 685 struct __atomic_base<_PTp*> 686 { 687 private: 688 typedef _PTp* __pointer_type; 689 690 __pointer_type _M_p _GLIBCXX20_INIT(nullptr); 691 692 // Factored out to facilitate explicit specialization. 693 constexpr ptrdiff_t 694 _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); } 695 696 constexpr ptrdiff_t 697 _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); } 698 699 public: 700 __atomic_base() noexcept = default; 701 ~__atomic_base() noexcept = default; 702 __atomic_base(const __atomic_base&) = delete; 703 __atomic_base& operator=(const __atomic_base&) = delete; 704 __atomic_base& operator=(const __atomic_base&) volatile = delete; 705 706 // Requires __pointer_type convertible to _M_p. 707 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { } 708 709 operator __pointer_type() const noexcept 710 { return load(); } 711 712 operator __pointer_type() const volatile noexcept 713 { return load(); } 714 715 __pointer_type 716 operator=(__pointer_type __p) noexcept 717 { 718 store(__p); 719 return __p; 720 } 721 722 __pointer_type 723 operator=(__pointer_type __p) volatile noexcept 724 { 725 store(__p); 726 return __p; 727 } 728 729 __pointer_type 730 operator++(int) noexcept 731 { return fetch_add(1); } 732 733 __pointer_type 734 operator++(int) volatile noexcept 735 { return fetch_add(1); } 736 737 __pointer_type 738 operator--(int) noexcept 739 { return fetch_sub(1); } 740 741 __pointer_type 742 operator--(int) volatile noexcept 743 { return fetch_sub(1); } 744 745 __pointer_type 746 operator++() noexcept 747 { return __atomic_add_fetch(&_M_p, _M_type_size(1), 748 int(memory_order_seq_cst)); } 749 750 __pointer_type 751 operator++() volatile noexcept 752 { return __atomic_add_fetch(&_M_p, _M_type_size(1), 753 int(memory_order_seq_cst)); } 754 755 __pointer_type 756 operator--() noexcept 757 { return __atomic_sub_fetch(&_M_p, _M_type_size(1), 758 int(memory_order_seq_cst)); } 759 760 __pointer_type 761 operator--() volatile noexcept 762 { return __atomic_sub_fetch(&_M_p, _M_type_size(1), 763 int(memory_order_seq_cst)); } 764 765 __pointer_type 766 operator+=(ptrdiff_t __d) noexcept 767 { return __atomic_add_fetch(&_M_p, _M_type_size(__d), 768 int(memory_order_seq_cst)); } 769 770 __pointer_type 771 operator+=(ptrdiff_t __d) volatile noexcept 772 { return __atomic_add_fetch(&_M_p, _M_type_size(__d), 773 int(memory_order_seq_cst)); } 774 775 __pointer_type 776 operator-=(ptrdiff_t __d) noexcept 777 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d), 778 int(memory_order_seq_cst)); } 779 780 __pointer_type 781 operator-=(ptrdiff_t __d) volatile noexcept 782 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d), 783 int(memory_order_seq_cst)); } 784 785 bool 786 is_lock_free() const noexcept 787 { 788 // Produce a fake, minimally aligned pointer. 789 return __atomic_is_lock_free(sizeof(_M_p), 790 reinterpret_cast<void *>(-__alignof(_M_p))); 791 } 792 793 bool 794 is_lock_free() const volatile noexcept 795 { 796 // Produce a fake, minimally aligned pointer. 797 return __atomic_is_lock_free(sizeof(_M_p), 798 reinterpret_cast<void *>(-__alignof(_M_p))); 799 } 800 801 _GLIBCXX_ALWAYS_INLINE void 802 store(__pointer_type __p, 803 memory_order __m = memory_order_seq_cst) noexcept 804 { 805 memory_order __b __attribute__ ((__unused__)) 806 = __m & __memory_order_mask; 807 808 __glibcxx_assert(__b != memory_order_acquire); 809 __glibcxx_assert(__b != memory_order_acq_rel); 810 __glibcxx_assert(__b != memory_order_consume); 811 812 __atomic_store_n(&_M_p, __p, int(__m)); 813 } 814 815 _GLIBCXX_ALWAYS_INLINE void 816 store(__pointer_type __p, 817 memory_order __m = memory_order_seq_cst) volatile noexcept 818 { 819 memory_order __b __attribute__ ((__unused__)) 820 = __m & __memory_order_mask; 821 __glibcxx_assert(__b != memory_order_acquire); 822 __glibcxx_assert(__b != memory_order_acq_rel); 823 __glibcxx_assert(__b != memory_order_consume); 824 825 __atomic_store_n(&_M_p, __p, int(__m)); 826 } 827 828 _GLIBCXX_ALWAYS_INLINE __pointer_type 829 load(memory_order __m = memory_order_seq_cst) const noexcept 830 { 831 memory_order __b __attribute__ ((__unused__)) 832 = __m & __memory_order_mask; 833 __glibcxx_assert(__b != memory_order_release); 834 __glibcxx_assert(__b != memory_order_acq_rel); 835 836 return __atomic_load_n(&_M_p, int(__m)); 837 } 838 839 _GLIBCXX_ALWAYS_INLINE __pointer_type 840 load(memory_order __m = memory_order_seq_cst) const volatile noexcept 841 { 842 memory_order __b __attribute__ ((__unused__)) 843 = __m & __memory_order_mask; 844 __glibcxx_assert(__b != memory_order_release); 845 __glibcxx_assert(__b != memory_order_acq_rel); 846 847 return __atomic_load_n(&_M_p, int(__m)); 848 } 849 850 _GLIBCXX_ALWAYS_INLINE __pointer_type 851 exchange(__pointer_type __p, 852 memory_order __m = memory_order_seq_cst) noexcept 853 { 854 return __atomic_exchange_n(&_M_p, __p, int(__m)); 855 } 856 857 858 _GLIBCXX_ALWAYS_INLINE __pointer_type 859 exchange(__pointer_type __p, 860 memory_order __m = memory_order_seq_cst) volatile noexcept 861 { 862 return __atomic_exchange_n(&_M_p, __p, int(__m)); 863 } 864 865 _GLIBCXX_ALWAYS_INLINE bool 866 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, 867 memory_order __m1, 868 memory_order __m2) noexcept 869 { 870 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); 871 872 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1, 873 int(__m1), int(__m2)); 874 } 875 876 _GLIBCXX_ALWAYS_INLINE bool 877 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, 878 memory_order __m1, 879 memory_order __m2) volatile noexcept 880 { 881 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); 882 883 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1, 884 int(__m1), int(__m2)); 885 } 886 887 _GLIBCXX_ALWAYS_INLINE bool 888 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, 889 memory_order __m1, 890 memory_order __m2) noexcept 891 { 892 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); 893 894 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, 895 int(__m1), int(__m2)); 896 } 897 898 _GLIBCXX_ALWAYS_INLINE bool 899 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, 900 memory_order __m1, 901 memory_order __m2) volatile noexcept 902 { 903 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2)); 904 905 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, 906 int(__m1), int(__m2)); 907 } 908 909 #if __cpp_lib_atomic_wait 910 _GLIBCXX_ALWAYS_INLINE void 911 wait(__pointer_type __old, 912 memory_order __m = memory_order_seq_cst) const noexcept 913 { 914 std::__atomic_wait_address_v(&_M_p, __old, 915 [__m, this] 916 { return this->load(__m); }); 917 } 918 919 // TODO add const volatile overload 920 921 _GLIBCXX_ALWAYS_INLINE void 922 notify_one() const noexcept 923 { std::__atomic_notify_address(&_M_p, false); } 924 925 // TODO add const volatile overload 926 927 _GLIBCXX_ALWAYS_INLINE void 928 notify_all() const noexcept 929 { std::__atomic_notify_address(&_M_p, true); } 930 931 // TODO add const volatile overload 932 #endif // __cpp_lib_atomic_wait 933 934 _GLIBCXX_ALWAYS_INLINE __pointer_type 935 fetch_add(ptrdiff_t __d, 936 memory_order __m = memory_order_seq_cst) noexcept 937 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); } 938 939 _GLIBCXX_ALWAYS_INLINE __pointer_type 940 fetch_add(ptrdiff_t __d, 941 memory_order __m = memory_order_seq_cst) volatile noexcept 942 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); } 943 944 _GLIBCXX_ALWAYS_INLINE __pointer_type 945 fetch_sub(ptrdiff_t __d, 946 memory_order __m = memory_order_seq_cst) noexcept 947 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); } 948 949 _GLIBCXX_ALWAYS_INLINE __pointer_type 950 fetch_sub(ptrdiff_t __d, 951 memory_order __m = memory_order_seq_cst) volatile noexcept 952 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); } 953 }; 954 955 /// @endcond 956 957 #if __cplusplus > 201703L 958 /// @cond undocumented 959 960 // Implementation details of atomic_ref and atomic<floating-point>. 961 namespace __atomic_impl 962 { 963 // Remove volatile and create a non-deduced context for value arguments. 964 template<typename _Tp> 965 using _Val = remove_volatile_t<_Tp>; 966 967 // As above, but for difference_type arguments. 968 template<typename _Tp> 969 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>; 970 971 template<size_t _Size, size_t _Align> 972 _GLIBCXX_ALWAYS_INLINE bool 973 is_lock_free() noexcept 974 { 975 // Produce a fake, minimally aligned pointer. 976 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align)); 977 } 978 979 template<typename _Tp> 980 _GLIBCXX_ALWAYS_INLINE void 981 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept 982 { __atomic_store(__ptr, std::__addressof(__t), int(__m)); } 983 984 template<typename _Tp> 985 _GLIBCXX_ALWAYS_INLINE _Val<_Tp> 986 load(const _Tp* __ptr, memory_order __m) noexcept 987 { 988 alignas(_Tp) unsigned char __buf[sizeof(_Tp)]; 989 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf); 990 __atomic_load(__ptr, __dest, int(__m)); 991 return *__dest; 992 } 993 994 template<typename _Tp> 995 _GLIBCXX_ALWAYS_INLINE _Val<_Tp> 996 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept 997 { 998 alignas(_Tp) unsigned char __buf[sizeof(_Tp)]; 999 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf); 1000 __atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m)); 1001 return *__dest; 1002 } 1003 1004 template<typename _Tp> 1005 _GLIBCXX_ALWAYS_INLINE bool 1006 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected, 1007 _Val<_Tp> __desired, memory_order __success, 1008 memory_order __failure) noexcept 1009 { 1010 __glibcxx_assert(__is_valid_cmpexch_failure_order(__failure)); 1011 1012 return __atomic_compare_exchange(__ptr, std::__addressof(__expected), 1013 std::__addressof(__desired), true, 1014 int(__success), int(__failure)); 1015 } 1016 1017 template<typename _Tp> 1018 _GLIBCXX_ALWAYS_INLINE bool 1019 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected, 1020 _Val<_Tp> __desired, memory_order __success, 1021 memory_order __failure) noexcept 1022 { 1023 __glibcxx_assert(__is_valid_cmpexch_failure_order(__failure)); 1024 1025 return __atomic_compare_exchange(__ptr, std::__addressof(__expected), 1026 std::__addressof(__desired), false, 1027 int(__success), int(__failure)); 1028 } 1029 1030 #if __cpp_lib_atomic_wait 1031 template<typename _Tp> 1032 _GLIBCXX_ALWAYS_INLINE void 1033 wait(const _Tp* __ptr, _Val<_Tp> __old, 1034 memory_order __m = memory_order_seq_cst) noexcept 1035 { 1036 std::__atomic_wait_address_v(__ptr, __old, 1037 [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); }); 1038 } 1039 1040 // TODO add const volatile overload 1041 1042 template<typename _Tp> 1043 _GLIBCXX_ALWAYS_INLINE void 1044 notify_one(const _Tp* __ptr) noexcept 1045 { std::__atomic_notify_address(__ptr, false); } 1046 1047 // TODO add const volatile overload 1048 1049 template<typename _Tp> 1050 _GLIBCXX_ALWAYS_INLINE void 1051 notify_all(const _Tp* __ptr) noexcept 1052 { std::__atomic_notify_address(__ptr, true); } 1053 1054 // TODO add const volatile overload 1055 #endif // __cpp_lib_atomic_wait 1056 1057 template<typename _Tp> 1058 _GLIBCXX_ALWAYS_INLINE _Tp 1059 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept 1060 { return __atomic_fetch_add(__ptr, __i, int(__m)); } 1061 1062 template<typename _Tp> 1063 _GLIBCXX_ALWAYS_INLINE _Tp 1064 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept 1065 { return __atomic_fetch_sub(__ptr, __i, int(__m)); } 1066 1067 template<typename _Tp> 1068 _GLIBCXX_ALWAYS_INLINE _Tp 1069 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept 1070 { return __atomic_fetch_and(__ptr, __i, int(__m)); } 1071 1072 template<typename _Tp> 1073 _GLIBCXX_ALWAYS_INLINE _Tp 1074 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept 1075 { return __atomic_fetch_or(__ptr, __i, int(__m)); } 1076 1077 template<typename _Tp> 1078 _GLIBCXX_ALWAYS_INLINE _Tp 1079 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept 1080 { return __atomic_fetch_xor(__ptr, __i, int(__m)); } 1081 1082 template<typename _Tp> 1083 _GLIBCXX_ALWAYS_INLINE _Tp 1084 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept 1085 { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } 1086 1087 template<typename _Tp> 1088 _GLIBCXX_ALWAYS_INLINE _Tp 1089 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept 1090 { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } 1091 1092 template<typename _Tp> 1093 _GLIBCXX_ALWAYS_INLINE _Tp 1094 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept 1095 { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } 1096 1097 template<typename _Tp> 1098 _GLIBCXX_ALWAYS_INLINE _Tp 1099 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept 1100 { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } 1101 1102 template<typename _Tp> 1103 _GLIBCXX_ALWAYS_INLINE _Tp 1104 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept 1105 { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); } 1106 1107 template<typename _Tp> 1108 _Tp 1109 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept 1110 { 1111 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed); 1112 _Val<_Tp> __newval = __oldval + __i; 1113 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m, 1114 memory_order_relaxed)) 1115 __newval = __oldval + __i; 1116 return __oldval; 1117 } 1118 1119 template<typename _Tp> 1120 _Tp 1121 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept 1122 { 1123 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed); 1124 _Val<_Tp> __newval = __oldval - __i; 1125 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m, 1126 memory_order_relaxed)) 1127 __newval = __oldval - __i; 1128 return __oldval; 1129 } 1130 1131 template<typename _Tp> 1132 _Tp 1133 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept 1134 { 1135 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed); 1136 _Val<_Tp> __newval = __oldval + __i; 1137 while (!compare_exchange_weak(__ptr, __oldval, __newval, 1138 memory_order_seq_cst, 1139 memory_order_relaxed)) 1140 __newval = __oldval + __i; 1141 return __newval; 1142 } 1143 1144 template<typename _Tp> 1145 _Tp 1146 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept 1147 { 1148 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed); 1149 _Val<_Tp> __newval = __oldval - __i; 1150 while (!compare_exchange_weak(__ptr, __oldval, __newval, 1151 memory_order_seq_cst, 1152 memory_order_relaxed)) 1153 __newval = __oldval - __i; 1154 return __newval; 1155 } 1156 } // namespace __atomic_impl 1157 1158 // base class for atomic<floating-point-type> 1159 template<typename _Fp> 1160 struct __atomic_float 1161 { 1162 static_assert(is_floating_point_v<_Fp>); 1163 1164 static constexpr size_t _S_alignment = __alignof__(_Fp); 1165 1166 public: 1167 using value_type = _Fp; 1168 using difference_type = value_type; 1169 1170 static constexpr bool is_always_lock_free 1171 = __atomic_always_lock_free(sizeof(_Fp), 0); 1172 1173 __atomic_float() = default; 1174 1175 constexpr 1176 __atomic_float(_Fp __t) : _M_fp(__t) 1177 { } 1178 1179 __atomic_float(const __atomic_float&) = delete; 1180 __atomic_float& operator=(const __atomic_float&) = delete; 1181 __atomic_float& operator=(const __atomic_float&) volatile = delete; 1182 1183 _Fp 1184 operator=(_Fp __t) volatile noexcept 1185 { 1186 this->store(__t); 1187 return __t; 1188 } 1189 1190 _Fp 1191 operator=(_Fp __t) noexcept 1192 { 1193 this->store(__t); 1194 return __t; 1195 } 1196 1197 bool 1198 is_lock_free() const volatile noexcept 1199 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); } 1200 1201 bool 1202 is_lock_free() const noexcept 1203 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); } 1204 1205 void 1206 store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept 1207 { __atomic_impl::store(&_M_fp, __t, __m); } 1208 1209 void 1210 store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept 1211 { __atomic_impl::store(&_M_fp, __t, __m); } 1212 1213 _Fp 1214 load(memory_order __m = memory_order_seq_cst) const volatile noexcept 1215 { return __atomic_impl::load(&_M_fp, __m); } 1216 1217 _Fp 1218 load(memory_order __m = memory_order_seq_cst) const noexcept 1219 { return __atomic_impl::load(&_M_fp, __m); } 1220 1221 operator _Fp() const volatile noexcept { return this->load(); } 1222 operator _Fp() const noexcept { return this->load(); } 1223 1224 _Fp 1225 exchange(_Fp __desired, 1226 memory_order __m = memory_order_seq_cst) volatile noexcept 1227 { return __atomic_impl::exchange(&_M_fp, __desired, __m); } 1228 1229 _Fp 1230 exchange(_Fp __desired, 1231 memory_order __m = memory_order_seq_cst) noexcept 1232 { return __atomic_impl::exchange(&_M_fp, __desired, __m); } 1233 1234 bool 1235 compare_exchange_weak(_Fp& __expected, _Fp __desired, 1236 memory_order __success, 1237 memory_order __failure) noexcept 1238 { 1239 return __atomic_impl::compare_exchange_weak(&_M_fp, 1240 __expected, __desired, 1241 __success, __failure); 1242 } 1243 1244 bool 1245 compare_exchange_weak(_Fp& __expected, _Fp __desired, 1246 memory_order __success, 1247 memory_order __failure) volatile noexcept 1248 { 1249 return __atomic_impl::compare_exchange_weak(&_M_fp, 1250 __expected, __desired, 1251 __success, __failure); 1252 } 1253 1254 bool 1255 compare_exchange_strong(_Fp& __expected, _Fp __desired, 1256 memory_order __success, 1257 memory_order __failure) noexcept 1258 { 1259 return __atomic_impl::compare_exchange_strong(&_M_fp, 1260 __expected, __desired, 1261 __success, __failure); 1262 } 1263 1264 bool 1265 compare_exchange_strong(_Fp& __expected, _Fp __desired, 1266 memory_order __success, 1267 memory_order __failure) volatile noexcept 1268 { 1269 return __atomic_impl::compare_exchange_strong(&_M_fp, 1270 __expected, __desired, 1271 __success, __failure); 1272 } 1273 1274 bool 1275 compare_exchange_weak(_Fp& __expected, _Fp __desired, 1276 memory_order __order = memory_order_seq_cst) 1277 noexcept 1278 { 1279 return compare_exchange_weak(__expected, __desired, __order, 1280 __cmpexch_failure_order(__order)); 1281 } 1282 1283 bool 1284 compare_exchange_weak(_Fp& __expected, _Fp __desired, 1285 memory_order __order = memory_order_seq_cst) 1286 volatile noexcept 1287 { 1288 return compare_exchange_weak(__expected, __desired, __order, 1289 __cmpexch_failure_order(__order)); 1290 } 1291 1292 bool 1293 compare_exchange_strong(_Fp& __expected, _Fp __desired, 1294 memory_order __order = memory_order_seq_cst) 1295 noexcept 1296 { 1297 return compare_exchange_strong(__expected, __desired, __order, 1298 __cmpexch_failure_order(__order)); 1299 } 1300 1301 bool 1302 compare_exchange_strong(_Fp& __expected, _Fp __desired, 1303 memory_order __order = memory_order_seq_cst) 1304 volatile noexcept 1305 { 1306 return compare_exchange_strong(__expected, __desired, __order, 1307 __cmpexch_failure_order(__order)); 1308 } 1309 1310 #if __cpp_lib_atomic_wait 1311 _GLIBCXX_ALWAYS_INLINE void 1312 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept 1313 { __atomic_impl::wait(&_M_fp, __old, __m); } 1314 1315 // TODO add const volatile overload 1316 1317 _GLIBCXX_ALWAYS_INLINE void 1318 notify_one() const noexcept 1319 { __atomic_impl::notify_one(&_M_fp); } 1320 1321 // TODO add const volatile overload 1322 1323 _GLIBCXX_ALWAYS_INLINE void 1324 notify_all() const noexcept 1325 { __atomic_impl::notify_all(&_M_fp); } 1326 1327 // TODO add const volatile overload 1328 #endif // __cpp_lib_atomic_wait 1329 1330 value_type 1331 fetch_add(value_type __i, 1332 memory_order __m = memory_order_seq_cst) noexcept 1333 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); } 1334 1335 value_type 1336 fetch_add(value_type __i, 1337 memory_order __m = memory_order_seq_cst) volatile noexcept 1338 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); } 1339 1340 value_type 1341 fetch_sub(value_type __i, 1342 memory_order __m = memory_order_seq_cst) noexcept 1343 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); } 1344 1345 value_type 1346 fetch_sub(value_type __i, 1347 memory_order __m = memory_order_seq_cst) volatile noexcept 1348 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); } 1349 1350 value_type 1351 operator+=(value_type __i) noexcept 1352 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); } 1353 1354 value_type 1355 operator+=(value_type __i) volatile noexcept 1356 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); } 1357 1358 value_type 1359 operator-=(value_type __i) noexcept 1360 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); } 1361 1362 value_type 1363 operator-=(value_type __i) volatile noexcept 1364 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); } 1365 1366 private: 1367 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0); 1368 }; 1369 #undef _GLIBCXX20_INIT 1370 1371 template<typename _Tp, 1372 bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>> 1373 struct __atomic_ref; 1374 1375 // base class for non-integral, non-floating-point, non-pointer types 1376 template<typename _Tp> 1377 struct __atomic_ref<_Tp, false, false> 1378 { 1379 static_assert(is_trivially_copyable_v<_Tp>); 1380 1381 // 1/2/4/8/16-byte types must be aligned to at least their size. 1382 static constexpr int _S_min_alignment 1383 = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16 1384 ? 0 : sizeof(_Tp); 1385 1386 public: 1387 using value_type = _Tp; 1388 1389 static constexpr bool is_always_lock_free 1390 = __atomic_always_lock_free(sizeof(_Tp), 0); 1391 1392 static constexpr size_t required_alignment 1393 = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp); 1394 1395 __atomic_ref& operator=(const __atomic_ref&) = delete; 1396 1397 explicit 1398 __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t)) 1399 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); } 1400 1401 __atomic_ref(const __atomic_ref&) noexcept = default; 1402 1403 _Tp 1404 operator=(_Tp __t) const noexcept 1405 { 1406 this->store(__t); 1407 return __t; 1408 } 1409 1410 operator _Tp() const noexcept { return this->load(); } 1411 1412 bool 1413 is_lock_free() const noexcept 1414 { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); } 1415 1416 void 1417 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept 1418 { __atomic_impl::store(_M_ptr, __t, __m); } 1419 1420 _Tp 1421 load(memory_order __m = memory_order_seq_cst) const noexcept 1422 { return __atomic_impl::load(_M_ptr, __m); } 1423 1424 _Tp 1425 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst) 1426 const noexcept 1427 { return __atomic_impl::exchange(_M_ptr, __desired, __m); } 1428 1429 bool 1430 compare_exchange_weak(_Tp& __expected, _Tp __desired, 1431 memory_order __success, 1432 memory_order __failure) const noexcept 1433 { 1434 return __atomic_impl::compare_exchange_weak(_M_ptr, 1435 __expected, __desired, 1436 __success, __failure); 1437 } 1438 1439 bool 1440 compare_exchange_strong(_Tp& __expected, _Tp __desired, 1441 memory_order __success, 1442 memory_order __failure) const noexcept 1443 { 1444 return __atomic_impl::compare_exchange_strong(_M_ptr, 1445 __expected, __desired, 1446 __success, __failure); 1447 } 1448 1449 bool 1450 compare_exchange_weak(_Tp& __expected, _Tp __desired, 1451 memory_order __order = memory_order_seq_cst) 1452 const noexcept 1453 { 1454 return compare_exchange_weak(__expected, __desired, __order, 1455 __cmpexch_failure_order(__order)); 1456 } 1457 1458 bool 1459 compare_exchange_strong(_Tp& __expected, _Tp __desired, 1460 memory_order __order = memory_order_seq_cst) 1461 const noexcept 1462 { 1463 return compare_exchange_strong(__expected, __desired, __order, 1464 __cmpexch_failure_order(__order)); 1465 } 1466 1467 #if __cpp_lib_atomic_wait 1468 _GLIBCXX_ALWAYS_INLINE void 1469 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept 1470 { __atomic_impl::wait(_M_ptr, __old, __m); } 1471 1472 // TODO add const volatile overload 1473 1474 _GLIBCXX_ALWAYS_INLINE void 1475 notify_one() const noexcept 1476 { __atomic_impl::notify_one(_M_ptr); } 1477 1478 // TODO add const volatile overload 1479 1480 _GLIBCXX_ALWAYS_INLINE void 1481 notify_all() const noexcept 1482 { __atomic_impl::notify_all(_M_ptr); } 1483 1484 // TODO add const volatile overload 1485 #endif // __cpp_lib_atomic_wait 1486 1487 private: 1488 _Tp* _M_ptr; 1489 }; 1490 1491 // base class for atomic_ref<integral-type> 1492 template<typename _Tp> 1493 struct __atomic_ref<_Tp, true, false> 1494 { 1495 static_assert(is_integral_v<_Tp>); 1496 1497 public: 1498 using value_type = _Tp; 1499 using difference_type = value_type; 1500 1501 static constexpr bool is_always_lock_free 1502 = __atomic_always_lock_free(sizeof(_Tp), 0); 1503 1504 static constexpr size_t required_alignment 1505 = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp); 1506 1507 __atomic_ref() = delete; 1508 __atomic_ref& operator=(const __atomic_ref&) = delete; 1509 1510 explicit 1511 __atomic_ref(_Tp& __t) : _M_ptr(&__t) 1512 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); } 1513 1514 __atomic_ref(const __atomic_ref&) noexcept = default; 1515 1516 _Tp 1517 operator=(_Tp __t) const noexcept 1518 { 1519 this->store(__t); 1520 return __t; 1521 } 1522 1523 operator _Tp() const noexcept { return this->load(); } 1524 1525 bool 1526 is_lock_free() const noexcept 1527 { 1528 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); 1529 } 1530 1531 void 1532 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept 1533 { __atomic_impl::store(_M_ptr, __t, __m); } 1534 1535 _Tp 1536 load(memory_order __m = memory_order_seq_cst) const noexcept 1537 { return __atomic_impl::load(_M_ptr, __m); } 1538 1539 _Tp 1540 exchange(_Tp __desired, 1541 memory_order __m = memory_order_seq_cst) const noexcept 1542 { return __atomic_impl::exchange(_M_ptr, __desired, __m); } 1543 1544 bool 1545 compare_exchange_weak(_Tp& __expected, _Tp __desired, 1546 memory_order __success, 1547 memory_order __failure) const noexcept 1548 { 1549 return __atomic_impl::compare_exchange_weak(_M_ptr, 1550 __expected, __desired, 1551 __success, __failure); 1552 } 1553 1554 bool 1555 compare_exchange_strong(_Tp& __expected, _Tp __desired, 1556 memory_order __success, 1557 memory_order __failure) const noexcept 1558 { 1559 return __atomic_impl::compare_exchange_strong(_M_ptr, 1560 __expected, __desired, 1561 __success, __failure); 1562 } 1563 1564 bool 1565 compare_exchange_weak(_Tp& __expected, _Tp __desired, 1566 memory_order __order = memory_order_seq_cst) 1567 const noexcept 1568 { 1569 return compare_exchange_weak(__expected, __desired, __order, 1570 __cmpexch_failure_order(__order)); 1571 } 1572 1573 bool 1574 compare_exchange_strong(_Tp& __expected, _Tp __desired, 1575 memory_order __order = memory_order_seq_cst) 1576 const noexcept 1577 { 1578 return compare_exchange_strong(__expected, __desired, __order, 1579 __cmpexch_failure_order(__order)); 1580 } 1581 1582 #if __cpp_lib_atomic_wait 1583 _GLIBCXX_ALWAYS_INLINE void 1584 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept 1585 { __atomic_impl::wait(_M_ptr, __old, __m); } 1586 1587 // TODO add const volatile overload 1588 1589 _GLIBCXX_ALWAYS_INLINE void 1590 notify_one() const noexcept 1591 { __atomic_impl::notify_one(_M_ptr); } 1592 1593 // TODO add const volatile overload 1594 1595 _GLIBCXX_ALWAYS_INLINE void 1596 notify_all() const noexcept 1597 { __atomic_impl::notify_all(_M_ptr); } 1598 1599 // TODO add const volatile overload 1600 #endif // __cpp_lib_atomic_wait 1601 1602 value_type 1603 fetch_add(value_type __i, 1604 memory_order __m = memory_order_seq_cst) const noexcept 1605 { return __atomic_impl::fetch_add(_M_ptr, __i, __m); } 1606 1607 value_type 1608 fetch_sub(value_type __i, 1609 memory_order __m = memory_order_seq_cst) const noexcept 1610 { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); } 1611 1612 value_type 1613 fetch_and(value_type __i, 1614 memory_order __m = memory_order_seq_cst) const noexcept 1615 { return __atomic_impl::fetch_and(_M_ptr, __i, __m); } 1616 1617 value_type 1618 fetch_or(value_type __i, 1619 memory_order __m = memory_order_seq_cst) const noexcept 1620 { return __atomic_impl::fetch_or(_M_ptr, __i, __m); } 1621 1622 value_type 1623 fetch_xor(value_type __i, 1624 memory_order __m = memory_order_seq_cst) const noexcept 1625 { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); } 1626 1627 _GLIBCXX_ALWAYS_INLINE value_type 1628 operator++(int) const noexcept 1629 { return fetch_add(1); } 1630 1631 _GLIBCXX_ALWAYS_INLINE value_type 1632 operator--(int) const noexcept 1633 { return fetch_sub(1); } 1634 1635 value_type 1636 operator++() const noexcept 1637 { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); } 1638 1639 value_type 1640 operator--() const noexcept 1641 { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); } 1642 1643 value_type 1644 operator+=(value_type __i) const noexcept 1645 { return __atomic_impl::__add_fetch(_M_ptr, __i); } 1646 1647 value_type 1648 operator-=(value_type __i) const noexcept 1649 { return __atomic_impl::__sub_fetch(_M_ptr, __i); } 1650 1651 value_type 1652 operator&=(value_type __i) const noexcept 1653 { return __atomic_impl::__and_fetch(_M_ptr, __i); } 1654 1655 value_type 1656 operator|=(value_type __i) const noexcept 1657 { return __atomic_impl::__or_fetch(_M_ptr, __i); } 1658 1659 value_type 1660 operator^=(value_type __i) const noexcept 1661 { return __atomic_impl::__xor_fetch(_M_ptr, __i); } 1662 1663 private: 1664 _Tp* _M_ptr; 1665 }; 1666 1667 // base class for atomic_ref<floating-point-type> 1668 template<typename _Fp> 1669 struct __atomic_ref<_Fp, false, true> 1670 { 1671 static_assert(is_floating_point_v<_Fp>); 1672 1673 public: 1674 using value_type = _Fp; 1675 using difference_type = value_type; 1676 1677 static constexpr bool is_always_lock_free 1678 = __atomic_always_lock_free(sizeof(_Fp), 0); 1679 1680 static constexpr size_t required_alignment = __alignof__(_Fp); 1681 1682 __atomic_ref() = delete; 1683 __atomic_ref& operator=(const __atomic_ref&) = delete; 1684 1685 explicit 1686 __atomic_ref(_Fp& __t) : _M_ptr(&__t) 1687 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); } 1688 1689 __atomic_ref(const __atomic_ref&) noexcept = default; 1690 1691 _Fp 1692 operator=(_Fp __t) const noexcept 1693 { 1694 this->store(__t); 1695 return __t; 1696 } 1697 1698 operator _Fp() const noexcept { return this->load(); } 1699 1700 bool 1701 is_lock_free() const noexcept 1702 { 1703 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>(); 1704 } 1705 1706 void 1707 store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept 1708 { __atomic_impl::store(_M_ptr, __t, __m); } 1709 1710 _Fp 1711 load(memory_order __m = memory_order_seq_cst) const noexcept 1712 { return __atomic_impl::load(_M_ptr, __m); } 1713 1714 _Fp 1715 exchange(_Fp __desired, 1716 memory_order __m = memory_order_seq_cst) const noexcept 1717 { return __atomic_impl::exchange(_M_ptr, __desired, __m); } 1718 1719 bool 1720 compare_exchange_weak(_Fp& __expected, _Fp __desired, 1721 memory_order __success, 1722 memory_order __failure) const noexcept 1723 { 1724 return __atomic_impl::compare_exchange_weak(_M_ptr, 1725 __expected, __desired, 1726 __success, __failure); 1727 } 1728 1729 bool 1730 compare_exchange_strong(_Fp& __expected, _Fp __desired, 1731 memory_order __success, 1732 memory_order __failure) const noexcept 1733 { 1734 return __atomic_impl::compare_exchange_strong(_M_ptr, 1735 __expected, __desired, 1736 __success, __failure); 1737 } 1738 1739 bool 1740 compare_exchange_weak(_Fp& __expected, _Fp __desired, 1741 memory_order __order = memory_order_seq_cst) 1742 const noexcept 1743 { 1744 return compare_exchange_weak(__expected, __desired, __order, 1745 __cmpexch_failure_order(__order)); 1746 } 1747 1748 bool 1749 compare_exchange_strong(_Fp& __expected, _Fp __desired, 1750 memory_order __order = memory_order_seq_cst) 1751 const noexcept 1752 { 1753 return compare_exchange_strong(__expected, __desired, __order, 1754 __cmpexch_failure_order(__order)); 1755 } 1756 1757 #if __cpp_lib_atomic_wait 1758 _GLIBCXX_ALWAYS_INLINE void 1759 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept 1760 { __atomic_impl::wait(_M_ptr, __old, __m); } 1761 1762 // TODO add const volatile overload 1763 1764 _GLIBCXX_ALWAYS_INLINE void 1765 notify_one() const noexcept 1766 { __atomic_impl::notify_one(_M_ptr); } 1767 1768 // TODO add const volatile overload 1769 1770 _GLIBCXX_ALWAYS_INLINE void 1771 notify_all() const noexcept 1772 { __atomic_impl::notify_all(_M_ptr); } 1773 1774 // TODO add const volatile overload 1775 #endif // __cpp_lib_atomic_wait 1776 1777 value_type 1778 fetch_add(value_type __i, 1779 memory_order __m = memory_order_seq_cst) const noexcept 1780 { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); } 1781 1782 value_type 1783 fetch_sub(value_type __i, 1784 memory_order __m = memory_order_seq_cst) const noexcept 1785 { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); } 1786 1787 value_type 1788 operator+=(value_type __i) const noexcept 1789 { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); } 1790 1791 value_type 1792 operator-=(value_type __i) const noexcept 1793 { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); } 1794 1795 private: 1796 _Fp* _M_ptr; 1797 }; 1798 1799 // base class for atomic_ref<pointer-type> 1800 template<typename _Tp> 1801 struct __atomic_ref<_Tp*, false, false> 1802 { 1803 public: 1804 using value_type = _Tp*; 1805 using difference_type = ptrdiff_t; 1806 1807 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2; 1808 1809 static constexpr size_t required_alignment = __alignof__(_Tp*); 1810 1811 __atomic_ref() = delete; 1812 __atomic_ref& operator=(const __atomic_ref&) = delete; 1813 1814 explicit 1815 __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t)) 1816 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); } 1817 1818 __atomic_ref(const __atomic_ref&) noexcept = default; 1819 1820 _Tp* 1821 operator=(_Tp* __t) const noexcept 1822 { 1823 this->store(__t); 1824 return __t; 1825 } 1826 1827 operator _Tp*() const noexcept { return this->load(); } 1828 1829 bool 1830 is_lock_free() const noexcept 1831 { 1832 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>(); 1833 } 1834 1835 void 1836 store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept 1837 { __atomic_impl::store(_M_ptr, __t, __m); } 1838 1839 _Tp* 1840 load(memory_order __m = memory_order_seq_cst) const noexcept 1841 { return __atomic_impl::load(_M_ptr, __m); } 1842 1843 _Tp* 1844 exchange(_Tp* __desired, 1845 memory_order __m = memory_order_seq_cst) const noexcept 1846 { return __atomic_impl::exchange(_M_ptr, __desired, __m); } 1847 1848 bool 1849 compare_exchange_weak(_Tp*& __expected, _Tp* __desired, 1850 memory_order __success, 1851 memory_order __failure) const noexcept 1852 { 1853 return __atomic_impl::compare_exchange_weak(_M_ptr, 1854 __expected, __desired, 1855 __success, __failure); 1856 } 1857 1858 bool 1859 compare_exchange_strong(_Tp*& __expected, _Tp* __desired, 1860 memory_order __success, 1861 memory_order __failure) const noexcept 1862 { 1863 return __atomic_impl::compare_exchange_strong(_M_ptr, 1864 __expected, __desired, 1865 __success, __failure); 1866 } 1867 1868 bool 1869 compare_exchange_weak(_Tp*& __expected, _Tp* __desired, 1870 memory_order __order = memory_order_seq_cst) 1871 const noexcept 1872 { 1873 return compare_exchange_weak(__expected, __desired, __order, 1874 __cmpexch_failure_order(__order)); 1875 } 1876 1877 bool 1878 compare_exchange_strong(_Tp*& __expected, _Tp* __desired, 1879 memory_order __order = memory_order_seq_cst) 1880 const noexcept 1881 { 1882 return compare_exchange_strong(__expected, __desired, __order, 1883 __cmpexch_failure_order(__order)); 1884 } 1885 1886 #if __cpp_lib_atomic_wait 1887 _GLIBCXX_ALWAYS_INLINE void 1888 wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept 1889 { __atomic_impl::wait(_M_ptr, __old, __m); } 1890 1891 // TODO add const volatile overload 1892 1893 _GLIBCXX_ALWAYS_INLINE void 1894 notify_one() const noexcept 1895 { __atomic_impl::notify_one(_M_ptr); } 1896 1897 // TODO add const volatile overload 1898 1899 _GLIBCXX_ALWAYS_INLINE void 1900 notify_all() const noexcept 1901 { __atomic_impl::notify_all(_M_ptr); } 1902 1903 // TODO add const volatile overload 1904 #endif // __cpp_lib_atomic_wait 1905 1906 _GLIBCXX_ALWAYS_INLINE value_type 1907 fetch_add(difference_type __d, 1908 memory_order __m = memory_order_seq_cst) const noexcept 1909 { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); } 1910 1911 _GLIBCXX_ALWAYS_INLINE value_type 1912 fetch_sub(difference_type __d, 1913 memory_order __m = memory_order_seq_cst) const noexcept 1914 { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); } 1915 1916 value_type 1917 operator++(int) const noexcept 1918 { return fetch_add(1); } 1919 1920 value_type 1921 operator--(int) const noexcept 1922 { return fetch_sub(1); } 1923 1924 value_type 1925 operator++() const noexcept 1926 { 1927 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1)); 1928 } 1929 1930 value_type 1931 operator--() const noexcept 1932 { 1933 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1)); 1934 } 1935 1936 value_type 1937 operator+=(difference_type __d) const noexcept 1938 { 1939 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d)); 1940 } 1941 1942 value_type 1943 operator-=(difference_type __d) const noexcept 1944 { 1945 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d)); 1946 } 1947 1948 private: 1949 static constexpr ptrdiff_t 1950 _S_type_size(ptrdiff_t __d) noexcept 1951 { 1952 static_assert(is_object_v<_Tp>); 1953 return __d * sizeof(_Tp); 1954 } 1955 1956 _Tp** _M_ptr; 1957 }; 1958 1959 /// @endcond 1960 #endif // C++2a 1961 1962 /// @} group atomics 1963 1964 _GLIBCXX_END_NAMESPACE_VERSION 1965 } // namespace std 1966 1967 #endif 1968