xref: /netbsd-src/external/gpl3/gcc/dist/libstdc++-v3/include/bits/atomic_base.h (revision d16b7486a53dcb8072b60ec6fcb4373a2d0c27b7)
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2008-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_base.h
26  *  This is an internal header file, included by other library headers.
27  *  Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #include <stdint.h>
37 #include <bits/atomic_lockfree_defines.h>
38 #include <bits/move.h>
39 
40 #if __cplusplus > 201703L && _GLIBCXX_HOSTED
41 #include <bits/atomic_wait.h>
42 #endif
43 
44 #ifndef _GLIBCXX_ALWAYS_INLINE
45 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
46 #endif
47 
48 namespace std _GLIBCXX_VISIBILITY(default)
49 {
50 _GLIBCXX_BEGIN_NAMESPACE_VERSION
51 
52   /**
53    * @defgroup atomics Atomics
54    *
55    * Components for performing atomic operations.
56    * @{
57    */
58 
59   /// Enumeration for memory_order
60 #if __cplusplus > 201703L
61   enum class memory_order : int
62     {
63       relaxed,
64       consume,
65       acquire,
66       release,
67       acq_rel,
68       seq_cst
69     };
70 
71   inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
72   inline constexpr memory_order memory_order_consume = memory_order::consume;
73   inline constexpr memory_order memory_order_acquire = memory_order::acquire;
74   inline constexpr memory_order memory_order_release = memory_order::release;
75   inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
76   inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
77 #else
78   typedef enum memory_order
79     {
80       memory_order_relaxed,
81       memory_order_consume,
82       memory_order_acquire,
83       memory_order_release,
84       memory_order_acq_rel,
85       memory_order_seq_cst
86     } memory_order;
87 #endif
88 
89   enum __memory_order_modifier
90     {
91       __memory_order_mask          = 0x0ffff,
92       __memory_order_modifier_mask = 0xffff0000,
93       __memory_order_hle_acquire   = 0x10000,
94       __memory_order_hle_release   = 0x20000
95     };
96 
97   constexpr memory_order
98   operator|(memory_order __m, __memory_order_modifier __mod)
99   {
100     return memory_order(int(__m) | int(__mod));
101   }
102 
103   constexpr memory_order
104   operator&(memory_order __m, __memory_order_modifier __mod)
105   {
106     return memory_order(int(__m) & int(__mod));
107   }
108 
109   // Drop release ordering as per [atomics.types.operations.req]/21
110   constexpr memory_order
111   __cmpexch_failure_order2(memory_order __m) noexcept
112   {
113     return __m == memory_order_acq_rel ? memory_order_acquire
114       : __m == memory_order_release ? memory_order_relaxed : __m;
115   }
116 
117   constexpr memory_order
118   __cmpexch_failure_order(memory_order __m) noexcept
119   {
120     return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
121       | __memory_order_modifier(__m & __memory_order_modifier_mask));
122   }
123 
124   constexpr bool
125   __is_valid_cmpexch_failure_order(memory_order __m) noexcept
126   {
127     return (__m & __memory_order_mask) != memory_order_release
128 	&& (__m & __memory_order_mask) != memory_order_acq_rel;
129   }
130 
131   _GLIBCXX_ALWAYS_INLINE void
132   atomic_thread_fence(memory_order __m) noexcept
133   { __atomic_thread_fence(int(__m)); }
134 
135   _GLIBCXX_ALWAYS_INLINE void
136   atomic_signal_fence(memory_order __m) noexcept
137   { __atomic_signal_fence(int(__m)); }
138 
139   /// kill_dependency
140   template<typename _Tp>
141     inline _Tp
142     kill_dependency(_Tp __y) noexcept
143     {
144       _Tp __ret(__y);
145       return __ret;
146     }
147 
148   // Base types for atomics.
149   template<typename _IntTp>
150     struct __atomic_base;
151 
152 #if __cplusplus <= 201703L
153 # define _GLIBCXX20_INIT(I)
154 #else
155 # define __cpp_lib_atomic_value_initialization 201911L
156 # define _GLIBCXX20_INIT(I) = I
157 #endif
158 
159 #define ATOMIC_VAR_INIT(_VI) { _VI }
160 
161   template<typename _Tp>
162     struct atomic;
163 
164   template<typename _Tp>
165     struct atomic<_Tp*>;
166 
167     /* The target's "set" value for test-and-set may not be exactly 1.  */
168 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
169     typedef bool __atomic_flag_data_type;
170 #else
171     typedef unsigned char __atomic_flag_data_type;
172 #endif
173 
174   /**
175    *  @brief Base type for atomic_flag.
176    *
177    *  Base type is POD with data, allowing atomic_flag to derive from
178    *  it and meet the standard layout type requirement. In addition to
179    *  compatibility with a C interface, this allows different
180    *  implementations of atomic_flag to use the same atomic operation
181    *  functions, via a standard conversion to the __atomic_flag_base
182    *  argument.
183   */
184   _GLIBCXX_BEGIN_EXTERN_C
185 
186   struct __atomic_flag_base
187   {
188     __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
189   };
190 
191   _GLIBCXX_END_EXTERN_C
192 
193 #define ATOMIC_FLAG_INIT { 0 }
194 
195   /// atomic_flag
196   struct atomic_flag : public __atomic_flag_base
197   {
198     atomic_flag() noexcept = default;
199     ~atomic_flag() noexcept = default;
200     atomic_flag(const atomic_flag&) = delete;
201     atomic_flag& operator=(const atomic_flag&) = delete;
202     atomic_flag& operator=(const atomic_flag&) volatile = delete;
203 
204     // Conversion to ATOMIC_FLAG_INIT.
205     constexpr atomic_flag(bool __i) noexcept
206       : __atomic_flag_base{ _S_init(__i) }
207     { }
208 
209     _GLIBCXX_ALWAYS_INLINE bool
210     test_and_set(memory_order __m = memory_order_seq_cst) noexcept
211     {
212       return __atomic_test_and_set (&_M_i, int(__m));
213     }
214 
215     _GLIBCXX_ALWAYS_INLINE bool
216     test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
217     {
218       return __atomic_test_and_set (&_M_i, int(__m));
219     }
220 
221 #if __cplusplus > 201703L
222 #define __cpp_lib_atomic_flag_test 201907L
223 
224     _GLIBCXX_ALWAYS_INLINE bool
225     test(memory_order __m = memory_order_seq_cst) const noexcept
226     {
227       __atomic_flag_data_type __v;
228       __atomic_load(&_M_i, &__v, int(__m));
229       return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
230     }
231 
232     _GLIBCXX_ALWAYS_INLINE bool
233     test(memory_order __m = memory_order_seq_cst) const volatile noexcept
234     {
235       __atomic_flag_data_type __v;
236       __atomic_load(&_M_i, &__v, int(__m));
237       return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
238     }
239 
240 #if __cpp_lib_atomic_wait
241     _GLIBCXX_ALWAYS_INLINE void
242     wait(bool __old,
243 	memory_order __m = memory_order_seq_cst) const noexcept
244     {
245       const __atomic_flag_data_type __v
246 	= __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
247 
248       std::__atomic_wait_address_v(&_M_i, __v,
249 	  [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
250     }
251 
252     // TODO add const volatile overload
253 
254     _GLIBCXX_ALWAYS_INLINE void
255     notify_one() noexcept
256     { std::__atomic_notify_address(&_M_i, false); }
257 
258     // TODO add const volatile overload
259 
260     _GLIBCXX_ALWAYS_INLINE void
261     notify_all() noexcept
262     { std::__atomic_notify_address(&_M_i, true); }
263 
264     // TODO add const volatile overload
265 #endif // __cpp_lib_atomic_wait
266 #endif // C++20
267 
268     _GLIBCXX_ALWAYS_INLINE void
269     clear(memory_order __m = memory_order_seq_cst) noexcept
270     {
271       memory_order __b __attribute__ ((__unused__))
272 	= __m & __memory_order_mask;
273       __glibcxx_assert(__b != memory_order_consume);
274       __glibcxx_assert(__b != memory_order_acquire);
275       __glibcxx_assert(__b != memory_order_acq_rel);
276 
277       __atomic_clear (&_M_i, int(__m));
278     }
279 
280     _GLIBCXX_ALWAYS_INLINE void
281     clear(memory_order __m = memory_order_seq_cst) volatile noexcept
282     {
283       memory_order __b __attribute__ ((__unused__))
284 	= __m & __memory_order_mask;
285       __glibcxx_assert(__b != memory_order_consume);
286       __glibcxx_assert(__b != memory_order_acquire);
287       __glibcxx_assert(__b != memory_order_acq_rel);
288 
289       __atomic_clear (&_M_i, int(__m));
290     }
291 
292   private:
293     static constexpr __atomic_flag_data_type
294     _S_init(bool __i)
295     { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
296   };
297 
298 
299   /// Base class for atomic integrals.
300   //
301   // For each of the integral types, define atomic_[integral type] struct
302   //
303   // atomic_bool     bool
304   // atomic_char     char
305   // atomic_schar    signed char
306   // atomic_uchar    unsigned char
307   // atomic_short    short
308   // atomic_ushort   unsigned short
309   // atomic_int      int
310   // atomic_uint     unsigned int
311   // atomic_long     long
312   // atomic_ulong    unsigned long
313   // atomic_llong    long long
314   // atomic_ullong   unsigned long long
315   // atomic_char8_t  char8_t
316   // atomic_char16_t char16_t
317   // atomic_char32_t char32_t
318   // atomic_wchar_t  wchar_t
319   //
320   // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
321   // 8 bytes, since that is what GCC built-in functions for atomic
322   // memory access expect.
323   template<typename _ITp>
324     struct __atomic_base
325     {
326       using value_type = _ITp;
327       using difference_type = value_type;
328 
329     private:
330       typedef _ITp 	__int_type;
331 
332       static constexpr int _S_alignment =
333 	sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
334 
335       alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
336 
337     public:
338       __atomic_base() noexcept = default;
339       ~__atomic_base() noexcept = default;
340       __atomic_base(const __atomic_base&) = delete;
341       __atomic_base& operator=(const __atomic_base&) = delete;
342       __atomic_base& operator=(const __atomic_base&) volatile = delete;
343 
344       // Requires __int_type convertible to _M_i.
345       constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
346 
347       operator __int_type() const noexcept
348       { return load(); }
349 
350       operator __int_type() const volatile noexcept
351       { return load(); }
352 
353       __int_type
354       operator=(__int_type __i) noexcept
355       {
356 	store(__i);
357 	return __i;
358       }
359 
360       __int_type
361       operator=(__int_type __i) volatile noexcept
362       {
363 	store(__i);
364 	return __i;
365       }
366 
367       __int_type
368       operator++(int) noexcept
369       { return fetch_add(1); }
370 
371       __int_type
372       operator++(int) volatile noexcept
373       { return fetch_add(1); }
374 
375       __int_type
376       operator--(int) noexcept
377       { return fetch_sub(1); }
378 
379       __int_type
380       operator--(int) volatile noexcept
381       { return fetch_sub(1); }
382 
383       __int_type
384       operator++() noexcept
385       { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
386 
387       __int_type
388       operator++() volatile noexcept
389       { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
390 
391       __int_type
392       operator--() noexcept
393       { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
394 
395       __int_type
396       operator--() volatile noexcept
397       { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
398 
399       __int_type
400       operator+=(__int_type __i) noexcept
401       { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
402 
403       __int_type
404       operator+=(__int_type __i) volatile noexcept
405       { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
406 
407       __int_type
408       operator-=(__int_type __i) noexcept
409       { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
410 
411       __int_type
412       operator-=(__int_type __i) volatile noexcept
413       { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
414 
415       __int_type
416       operator&=(__int_type __i) noexcept
417       { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
418 
419       __int_type
420       operator&=(__int_type __i) volatile noexcept
421       { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
422 
423       __int_type
424       operator|=(__int_type __i) noexcept
425       { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
426 
427       __int_type
428       operator|=(__int_type __i) volatile noexcept
429       { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
430 
431       __int_type
432       operator^=(__int_type __i) noexcept
433       { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
434 
435       __int_type
436       operator^=(__int_type __i) volatile noexcept
437       { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
438 
439       bool
440       is_lock_free() const noexcept
441       {
442 	// Use a fake, minimally aligned pointer.
443 	return __atomic_is_lock_free(sizeof(_M_i),
444 	    reinterpret_cast<void *>(-_S_alignment));
445       }
446 
447       bool
448       is_lock_free() const volatile noexcept
449       {
450 	// Use a fake, minimally aligned pointer.
451 	return __atomic_is_lock_free(sizeof(_M_i),
452 	    reinterpret_cast<void *>(-_S_alignment));
453       }
454 
455       _GLIBCXX_ALWAYS_INLINE void
456       store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
457       {
458 	memory_order __b __attribute__ ((__unused__))
459 	  = __m & __memory_order_mask;
460 	__glibcxx_assert(__b != memory_order_acquire);
461 	__glibcxx_assert(__b != memory_order_acq_rel);
462 	__glibcxx_assert(__b != memory_order_consume);
463 
464 	__atomic_store_n(&_M_i, __i, int(__m));
465       }
466 
467       _GLIBCXX_ALWAYS_INLINE void
468       store(__int_type __i,
469 	    memory_order __m = memory_order_seq_cst) volatile noexcept
470       {
471 	memory_order __b __attribute__ ((__unused__))
472 	  = __m & __memory_order_mask;
473 	__glibcxx_assert(__b != memory_order_acquire);
474 	__glibcxx_assert(__b != memory_order_acq_rel);
475 	__glibcxx_assert(__b != memory_order_consume);
476 
477 	__atomic_store_n(&_M_i, __i, int(__m));
478       }
479 
480       _GLIBCXX_ALWAYS_INLINE __int_type
481       load(memory_order __m = memory_order_seq_cst) const noexcept
482       {
483 	memory_order __b __attribute__ ((__unused__))
484 	  = __m & __memory_order_mask;
485 	__glibcxx_assert(__b != memory_order_release);
486 	__glibcxx_assert(__b != memory_order_acq_rel);
487 
488 	return __atomic_load_n(&_M_i, int(__m));
489       }
490 
491       _GLIBCXX_ALWAYS_INLINE __int_type
492       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
493       {
494 	memory_order __b __attribute__ ((__unused__))
495 	  = __m & __memory_order_mask;
496 	__glibcxx_assert(__b != memory_order_release);
497 	__glibcxx_assert(__b != memory_order_acq_rel);
498 
499 	return __atomic_load_n(&_M_i, int(__m));
500       }
501 
502       _GLIBCXX_ALWAYS_INLINE __int_type
503       exchange(__int_type __i,
504 	       memory_order __m = memory_order_seq_cst) noexcept
505       {
506 	return __atomic_exchange_n(&_M_i, __i, int(__m));
507       }
508 
509 
510       _GLIBCXX_ALWAYS_INLINE __int_type
511       exchange(__int_type __i,
512 	       memory_order __m = memory_order_seq_cst) volatile noexcept
513       {
514 	return __atomic_exchange_n(&_M_i, __i, int(__m));
515       }
516 
517       _GLIBCXX_ALWAYS_INLINE bool
518       compare_exchange_weak(__int_type& __i1, __int_type __i2,
519 			    memory_order __m1, memory_order __m2) noexcept
520       {
521 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
522 
523 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
524 					   int(__m1), int(__m2));
525       }
526 
527       _GLIBCXX_ALWAYS_INLINE bool
528       compare_exchange_weak(__int_type& __i1, __int_type __i2,
529 			    memory_order __m1,
530 			    memory_order __m2) volatile noexcept
531       {
532 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
533 
534 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
535 					   int(__m1), int(__m2));
536       }
537 
538       _GLIBCXX_ALWAYS_INLINE bool
539       compare_exchange_weak(__int_type& __i1, __int_type __i2,
540 			    memory_order __m = memory_order_seq_cst) noexcept
541       {
542 	return compare_exchange_weak(__i1, __i2, __m,
543 				     __cmpexch_failure_order(__m));
544       }
545 
546       _GLIBCXX_ALWAYS_INLINE bool
547       compare_exchange_weak(__int_type& __i1, __int_type __i2,
548 		   memory_order __m = memory_order_seq_cst) volatile noexcept
549       {
550 	return compare_exchange_weak(__i1, __i2, __m,
551 				     __cmpexch_failure_order(__m));
552       }
553 
554       _GLIBCXX_ALWAYS_INLINE bool
555       compare_exchange_strong(__int_type& __i1, __int_type __i2,
556 			      memory_order __m1, memory_order __m2) noexcept
557       {
558 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
559 
560 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
561 					   int(__m1), int(__m2));
562       }
563 
564       _GLIBCXX_ALWAYS_INLINE bool
565       compare_exchange_strong(__int_type& __i1, __int_type __i2,
566 			      memory_order __m1,
567 			      memory_order __m2) volatile noexcept
568       {
569 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
570 
571 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
572 					   int(__m1), int(__m2));
573       }
574 
575       _GLIBCXX_ALWAYS_INLINE bool
576       compare_exchange_strong(__int_type& __i1, __int_type __i2,
577 			      memory_order __m = memory_order_seq_cst) noexcept
578       {
579 	return compare_exchange_strong(__i1, __i2, __m,
580 				       __cmpexch_failure_order(__m));
581       }
582 
583       _GLIBCXX_ALWAYS_INLINE bool
584       compare_exchange_strong(__int_type& __i1, __int_type __i2,
585 		 memory_order __m = memory_order_seq_cst) volatile noexcept
586       {
587 	return compare_exchange_strong(__i1, __i2, __m,
588 				       __cmpexch_failure_order(__m));
589       }
590 
591 #if __cpp_lib_atomic_wait
592       _GLIBCXX_ALWAYS_INLINE void
593       wait(__int_type __old,
594 	  memory_order __m = memory_order_seq_cst) const noexcept
595       {
596 	std::__atomic_wait_address_v(&_M_i, __old,
597 			   [__m, this] { return this->load(__m); });
598       }
599 
600       // TODO add const volatile overload
601 
602       _GLIBCXX_ALWAYS_INLINE void
603       notify_one() noexcept
604       { std::__atomic_notify_address(&_M_i, false); }
605 
606       // TODO add const volatile overload
607 
608       _GLIBCXX_ALWAYS_INLINE void
609       notify_all() noexcept
610       { std::__atomic_notify_address(&_M_i, true); }
611 
612       // TODO add const volatile overload
613 #endif // __cpp_lib_atomic_wait
614 
615       _GLIBCXX_ALWAYS_INLINE __int_type
616       fetch_add(__int_type __i,
617 		memory_order __m = memory_order_seq_cst) noexcept
618       { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
619 
620       _GLIBCXX_ALWAYS_INLINE __int_type
621       fetch_add(__int_type __i,
622 		memory_order __m = memory_order_seq_cst) volatile noexcept
623       { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
624 
625       _GLIBCXX_ALWAYS_INLINE __int_type
626       fetch_sub(__int_type __i,
627 		memory_order __m = memory_order_seq_cst) noexcept
628       { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
629 
630       _GLIBCXX_ALWAYS_INLINE __int_type
631       fetch_sub(__int_type __i,
632 		memory_order __m = memory_order_seq_cst) volatile noexcept
633       { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
634 
635       _GLIBCXX_ALWAYS_INLINE __int_type
636       fetch_and(__int_type __i,
637 		memory_order __m = memory_order_seq_cst) noexcept
638       { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
639 
640       _GLIBCXX_ALWAYS_INLINE __int_type
641       fetch_and(__int_type __i,
642 		memory_order __m = memory_order_seq_cst) volatile noexcept
643       { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
644 
645       _GLIBCXX_ALWAYS_INLINE __int_type
646       fetch_or(__int_type __i,
647 	       memory_order __m = memory_order_seq_cst) noexcept
648       { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
649 
650       _GLIBCXX_ALWAYS_INLINE __int_type
651       fetch_or(__int_type __i,
652 	       memory_order __m = memory_order_seq_cst) volatile noexcept
653       { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
654 
655       _GLIBCXX_ALWAYS_INLINE __int_type
656       fetch_xor(__int_type __i,
657 		memory_order __m = memory_order_seq_cst) noexcept
658       { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
659 
660       _GLIBCXX_ALWAYS_INLINE __int_type
661       fetch_xor(__int_type __i,
662 		memory_order __m = memory_order_seq_cst) volatile noexcept
663       { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
664     };
665 
666 
667   /// Partial specialization for pointer types.
668   template<typename _PTp>
669     struct __atomic_base<_PTp*>
670     {
671     private:
672       typedef _PTp* 	__pointer_type;
673 
674       __pointer_type 	_M_p _GLIBCXX20_INIT(nullptr);
675 
676       // Factored out to facilitate explicit specialization.
677       constexpr ptrdiff_t
678       _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
679 
680       constexpr ptrdiff_t
681       _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
682 
683     public:
684       __atomic_base() noexcept = default;
685       ~__atomic_base() noexcept = default;
686       __atomic_base(const __atomic_base&) = delete;
687       __atomic_base& operator=(const __atomic_base&) = delete;
688       __atomic_base& operator=(const __atomic_base&) volatile = delete;
689 
690       // Requires __pointer_type convertible to _M_p.
691       constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
692 
693       operator __pointer_type() const noexcept
694       { return load(); }
695 
696       operator __pointer_type() const volatile noexcept
697       { return load(); }
698 
699       __pointer_type
700       operator=(__pointer_type __p) noexcept
701       {
702 	store(__p);
703 	return __p;
704       }
705 
706       __pointer_type
707       operator=(__pointer_type __p) volatile noexcept
708       {
709 	store(__p);
710 	return __p;
711       }
712 
713       __pointer_type
714       operator++(int) noexcept
715       { return fetch_add(1); }
716 
717       __pointer_type
718       operator++(int) volatile noexcept
719       { return fetch_add(1); }
720 
721       __pointer_type
722       operator--(int) noexcept
723       { return fetch_sub(1); }
724 
725       __pointer_type
726       operator--(int) volatile noexcept
727       { return fetch_sub(1); }
728 
729       __pointer_type
730       operator++() noexcept
731       { return __atomic_add_fetch(&_M_p, _M_type_size(1),
732 				  int(memory_order_seq_cst)); }
733 
734       __pointer_type
735       operator++() volatile noexcept
736       { return __atomic_add_fetch(&_M_p, _M_type_size(1),
737 				  int(memory_order_seq_cst)); }
738 
739       __pointer_type
740       operator--() noexcept
741       { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
742 				  int(memory_order_seq_cst)); }
743 
744       __pointer_type
745       operator--() volatile noexcept
746       { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
747 				  int(memory_order_seq_cst)); }
748 
749       __pointer_type
750       operator+=(ptrdiff_t __d) noexcept
751       { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
752 				  int(memory_order_seq_cst)); }
753 
754       __pointer_type
755       operator+=(ptrdiff_t __d) volatile noexcept
756       { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
757 				  int(memory_order_seq_cst)); }
758 
759       __pointer_type
760       operator-=(ptrdiff_t __d) noexcept
761       { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
762 				  int(memory_order_seq_cst)); }
763 
764       __pointer_type
765       operator-=(ptrdiff_t __d) volatile noexcept
766       { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
767 				  int(memory_order_seq_cst)); }
768 
769       bool
770       is_lock_free() const noexcept
771       {
772 	// Produce a fake, minimally aligned pointer.
773 	return __atomic_is_lock_free(sizeof(_M_p),
774 	    reinterpret_cast<void *>(-__alignof(_M_p)));
775       }
776 
777       bool
778       is_lock_free() const volatile noexcept
779       {
780 	// Produce a fake, minimally aligned pointer.
781 	return __atomic_is_lock_free(sizeof(_M_p),
782 	    reinterpret_cast<void *>(-__alignof(_M_p)));
783       }
784 
785       _GLIBCXX_ALWAYS_INLINE void
786       store(__pointer_type __p,
787 	    memory_order __m = memory_order_seq_cst) noexcept
788       {
789 	memory_order __b __attribute__ ((__unused__))
790 	  = __m & __memory_order_mask;
791 
792 	__glibcxx_assert(__b != memory_order_acquire);
793 	__glibcxx_assert(__b != memory_order_acq_rel);
794 	__glibcxx_assert(__b != memory_order_consume);
795 
796 	__atomic_store_n(&_M_p, __p, int(__m));
797       }
798 
799       _GLIBCXX_ALWAYS_INLINE void
800       store(__pointer_type __p,
801 	    memory_order __m = memory_order_seq_cst) volatile noexcept
802       {
803 	memory_order __b __attribute__ ((__unused__))
804 	  = __m & __memory_order_mask;
805 	__glibcxx_assert(__b != memory_order_acquire);
806 	__glibcxx_assert(__b != memory_order_acq_rel);
807 	__glibcxx_assert(__b != memory_order_consume);
808 
809 	__atomic_store_n(&_M_p, __p, int(__m));
810       }
811 
812       _GLIBCXX_ALWAYS_INLINE __pointer_type
813       load(memory_order __m = memory_order_seq_cst) const noexcept
814       {
815 	memory_order __b __attribute__ ((__unused__))
816 	  = __m & __memory_order_mask;
817 	__glibcxx_assert(__b != memory_order_release);
818 	__glibcxx_assert(__b != memory_order_acq_rel);
819 
820 	return __atomic_load_n(&_M_p, int(__m));
821       }
822 
823       _GLIBCXX_ALWAYS_INLINE __pointer_type
824       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
825       {
826 	memory_order __b __attribute__ ((__unused__))
827 	  = __m & __memory_order_mask;
828 	__glibcxx_assert(__b != memory_order_release);
829 	__glibcxx_assert(__b != memory_order_acq_rel);
830 
831 	return __atomic_load_n(&_M_p, int(__m));
832       }
833 
834       _GLIBCXX_ALWAYS_INLINE __pointer_type
835       exchange(__pointer_type __p,
836 	       memory_order __m = memory_order_seq_cst) noexcept
837       {
838 	return __atomic_exchange_n(&_M_p, __p, int(__m));
839       }
840 
841 
842       _GLIBCXX_ALWAYS_INLINE __pointer_type
843       exchange(__pointer_type __p,
844 	       memory_order __m = memory_order_seq_cst) volatile noexcept
845       {
846 	return __atomic_exchange_n(&_M_p, __p, int(__m));
847       }
848 
849       _GLIBCXX_ALWAYS_INLINE bool
850       compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
851 			    memory_order __m1,
852 			    memory_order __m2) noexcept
853       {
854 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
855 
856 	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
857 					   int(__m1), int(__m2));
858       }
859 
860       _GLIBCXX_ALWAYS_INLINE bool
861       compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
862 			    memory_order __m1,
863 			    memory_order __m2) volatile noexcept
864       {
865 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
866 
867 	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
868 					   int(__m1), int(__m2));
869       }
870 
871       _GLIBCXX_ALWAYS_INLINE bool
872       compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
873 			      memory_order __m1,
874 			      memory_order __m2) noexcept
875       {
876 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
877 
878 	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
879 					   int(__m1), int(__m2));
880       }
881 
882       _GLIBCXX_ALWAYS_INLINE bool
883       compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
884 			      memory_order __m1,
885 			      memory_order __m2) volatile noexcept
886       {
887 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
888 
889 	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
890 					   int(__m1), int(__m2));
891       }
892 
893 #if __cpp_lib_atomic_wait
894       _GLIBCXX_ALWAYS_INLINE void
895       wait(__pointer_type __old,
896 	   memory_order __m = memory_order_seq_cst) const noexcept
897       {
898 	std::__atomic_wait_address_v(&_M_p, __old,
899 				     [__m, this]
900 				     { return this->load(__m); });
901       }
902 
903       // TODO add const volatile overload
904 
905       _GLIBCXX_ALWAYS_INLINE void
906       notify_one() const noexcept
907       { std::__atomic_notify_address(&_M_p, false); }
908 
909       // TODO add const volatile overload
910 
911       _GLIBCXX_ALWAYS_INLINE void
912       notify_all() const noexcept
913       { std::__atomic_notify_address(&_M_p, true); }
914 
915       // TODO add const volatile overload
916 #endif // __cpp_lib_atomic_wait
917 
918       _GLIBCXX_ALWAYS_INLINE __pointer_type
919       fetch_add(ptrdiff_t __d,
920 		memory_order __m = memory_order_seq_cst) noexcept
921       { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
922 
923       _GLIBCXX_ALWAYS_INLINE __pointer_type
924       fetch_add(ptrdiff_t __d,
925 		memory_order __m = memory_order_seq_cst) volatile noexcept
926       { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
927 
928       _GLIBCXX_ALWAYS_INLINE __pointer_type
929       fetch_sub(ptrdiff_t __d,
930 		memory_order __m = memory_order_seq_cst) noexcept
931       { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
932 
933       _GLIBCXX_ALWAYS_INLINE __pointer_type
934       fetch_sub(ptrdiff_t __d,
935 		memory_order __m = memory_order_seq_cst) volatile noexcept
936       { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
937     };
938 
939 #if __cplusplus > 201703L
940   // Implementation details of atomic_ref and atomic<floating-point>.
941   namespace __atomic_impl
942   {
943     // Remove volatile and create a non-deduced context for value arguments.
944     template<typename _Tp>
945       using _Val = remove_volatile_t<_Tp>;
946 
947     // As above, but for difference_type arguments.
948     template<typename _Tp>
949       using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
950 
951     template<size_t _Size, size_t _Align>
952       _GLIBCXX_ALWAYS_INLINE bool
953       is_lock_free() noexcept
954       {
955 	// Produce a fake, minimally aligned pointer.
956 	return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
957       }
958 
959     template<typename _Tp>
960       _GLIBCXX_ALWAYS_INLINE void
961       store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
962       { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
963 
964     template<typename _Tp>
965       _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
966       load(const _Tp* __ptr, memory_order __m) noexcept
967       {
968 	alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
969 	auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
970 	__atomic_load(__ptr, __dest, int(__m));
971 	return *__dest;
972       }
973 
974     template<typename _Tp>
975       _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
976       exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
977       {
978         alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
979 	auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
980 	__atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
981 	return *__dest;
982       }
983 
984     template<typename _Tp>
985       _GLIBCXX_ALWAYS_INLINE bool
986       compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
987 			    _Val<_Tp> __desired, memory_order __success,
988 			    memory_order __failure) noexcept
989       {
990 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__failure));
991 
992 	return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
993 					 std::__addressof(__desired), true,
994 					 int(__success), int(__failure));
995       }
996 
997     template<typename _Tp>
998       _GLIBCXX_ALWAYS_INLINE bool
999       compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1000 			      _Val<_Tp> __desired, memory_order __success,
1001 			      memory_order __failure) noexcept
1002       {
1003 	__glibcxx_assert(__is_valid_cmpexch_failure_order(__failure));
1004 
1005 	return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
1006 					 std::__addressof(__desired), false,
1007 					 int(__success), int(__failure));
1008       }
1009 
1010 #if __cpp_lib_atomic_wait
1011     template<typename _Tp>
1012       _GLIBCXX_ALWAYS_INLINE void
1013       wait(const _Tp* __ptr, _Val<_Tp> __old,
1014 	   memory_order __m = memory_order_seq_cst) noexcept
1015       {
1016 	std::__atomic_wait_address_v(__ptr, __old,
1017 	    [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
1018       }
1019 
1020       // TODO add const volatile overload
1021 
1022     template<typename _Tp>
1023       _GLIBCXX_ALWAYS_INLINE void
1024       notify_one(const _Tp* __ptr) noexcept
1025       { std::__atomic_notify_address(__ptr, false); }
1026 
1027       // TODO add const volatile overload
1028 
1029     template<typename _Tp>
1030       _GLIBCXX_ALWAYS_INLINE void
1031       notify_all(const _Tp* __ptr) noexcept
1032       { std::__atomic_notify_address(__ptr, true); }
1033 
1034       // TODO add const volatile overload
1035 #endif // __cpp_lib_atomic_wait
1036 
1037     template<typename _Tp>
1038       _GLIBCXX_ALWAYS_INLINE _Tp
1039       fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1040       { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1041 
1042     template<typename _Tp>
1043       _GLIBCXX_ALWAYS_INLINE _Tp
1044       fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1045       { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1046 
1047     template<typename _Tp>
1048       _GLIBCXX_ALWAYS_INLINE _Tp
1049       fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1050       { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1051 
1052     template<typename _Tp>
1053       _GLIBCXX_ALWAYS_INLINE _Tp
1054       fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1055       { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1056 
1057     template<typename _Tp>
1058       _GLIBCXX_ALWAYS_INLINE _Tp
1059       fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1060       { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1061 
1062     template<typename _Tp>
1063       _GLIBCXX_ALWAYS_INLINE _Tp
1064       __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1065       { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1066 
1067     template<typename _Tp>
1068       _GLIBCXX_ALWAYS_INLINE _Tp
1069       __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1070       { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1071 
1072     template<typename _Tp>
1073       _GLIBCXX_ALWAYS_INLINE _Tp
1074       __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1075       { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1076 
1077     template<typename _Tp>
1078       _GLIBCXX_ALWAYS_INLINE _Tp
1079       __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1080       { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1081 
1082     template<typename _Tp>
1083       _GLIBCXX_ALWAYS_INLINE _Tp
1084       __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1085       { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1086 
1087     template<typename _Tp>
1088       _Tp
1089       __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1090       {
1091 	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1092 	_Val<_Tp> __newval = __oldval + __i;
1093 	while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1094 				      memory_order_relaxed))
1095 	  __newval = __oldval + __i;
1096 	return __oldval;
1097       }
1098 
1099     template<typename _Tp>
1100       _Tp
1101       __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1102       {
1103 	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1104 	_Val<_Tp> __newval = __oldval - __i;
1105 	while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1106 				      memory_order_relaxed))
1107 	  __newval = __oldval - __i;
1108 	return __oldval;
1109       }
1110 
1111     template<typename _Tp>
1112       _Tp
1113       __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1114       {
1115 	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1116 	_Val<_Tp> __newval = __oldval + __i;
1117 	while (!compare_exchange_weak(__ptr, __oldval, __newval,
1118 				      memory_order_seq_cst,
1119 				      memory_order_relaxed))
1120 	  __newval = __oldval + __i;
1121 	return __newval;
1122       }
1123 
1124     template<typename _Tp>
1125       _Tp
1126       __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1127       {
1128 	_Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1129 	_Val<_Tp> __newval = __oldval - __i;
1130 	while (!compare_exchange_weak(__ptr, __oldval, __newval,
1131 				      memory_order_seq_cst,
1132 				      memory_order_relaxed))
1133 	  __newval = __oldval - __i;
1134 	return __newval;
1135       }
1136   } // namespace __atomic_impl
1137 
1138   // base class for atomic<floating-point-type>
1139   template<typename _Fp>
1140     struct __atomic_float
1141     {
1142       static_assert(is_floating_point_v<_Fp>);
1143 
1144       static constexpr size_t _S_alignment = __alignof__(_Fp);
1145 
1146     public:
1147       using value_type = _Fp;
1148       using difference_type = value_type;
1149 
1150       static constexpr bool is_always_lock_free
1151 	= __atomic_always_lock_free(sizeof(_Fp), 0);
1152 
1153       __atomic_float() = default;
1154 
1155       constexpr
1156       __atomic_float(_Fp __t) : _M_fp(__t)
1157       { }
1158 
1159       __atomic_float(const __atomic_float&) = delete;
1160       __atomic_float& operator=(const __atomic_float&) = delete;
1161       __atomic_float& operator=(const __atomic_float&) volatile = delete;
1162 
1163       _Fp
1164       operator=(_Fp __t) volatile noexcept
1165       {
1166 	this->store(__t);
1167 	return __t;
1168       }
1169 
1170       _Fp
1171       operator=(_Fp __t) noexcept
1172       {
1173 	this->store(__t);
1174 	return __t;
1175       }
1176 
1177       bool
1178       is_lock_free() const volatile noexcept
1179       { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1180 
1181       bool
1182       is_lock_free() const noexcept
1183       { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1184 
1185       void
1186       store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1187       { __atomic_impl::store(&_M_fp, __t, __m); }
1188 
1189       void
1190       store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1191       { __atomic_impl::store(&_M_fp, __t, __m); }
1192 
1193       _Fp
1194       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1195       { return __atomic_impl::load(&_M_fp, __m); }
1196 
1197       _Fp
1198       load(memory_order __m = memory_order_seq_cst) const noexcept
1199       { return __atomic_impl::load(&_M_fp, __m); }
1200 
1201       operator _Fp() const volatile noexcept { return this->load(); }
1202       operator _Fp() const noexcept { return this->load(); }
1203 
1204       _Fp
1205       exchange(_Fp __desired,
1206 	       memory_order __m = memory_order_seq_cst) volatile noexcept
1207       { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1208 
1209       _Fp
1210       exchange(_Fp __desired,
1211 	       memory_order __m = memory_order_seq_cst) noexcept
1212       { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1213 
1214       bool
1215       compare_exchange_weak(_Fp& __expected, _Fp __desired,
1216 			    memory_order __success,
1217 			    memory_order __failure) noexcept
1218       {
1219 	return __atomic_impl::compare_exchange_weak(&_M_fp,
1220 						    __expected, __desired,
1221 						    __success, __failure);
1222       }
1223 
1224       bool
1225       compare_exchange_weak(_Fp& __expected, _Fp __desired,
1226 			    memory_order __success,
1227 			    memory_order __failure) volatile noexcept
1228       {
1229 	return __atomic_impl::compare_exchange_weak(&_M_fp,
1230 						    __expected, __desired,
1231 						    __success, __failure);
1232       }
1233 
1234       bool
1235       compare_exchange_strong(_Fp& __expected, _Fp __desired,
1236 			      memory_order __success,
1237 			      memory_order __failure) noexcept
1238       {
1239 	return __atomic_impl::compare_exchange_strong(&_M_fp,
1240 						      __expected, __desired,
1241 						      __success, __failure);
1242       }
1243 
1244       bool
1245       compare_exchange_strong(_Fp& __expected, _Fp __desired,
1246 			      memory_order __success,
1247 			      memory_order __failure) volatile noexcept
1248       {
1249 	return __atomic_impl::compare_exchange_strong(&_M_fp,
1250 						      __expected, __desired,
1251 						      __success, __failure);
1252       }
1253 
1254       bool
1255       compare_exchange_weak(_Fp& __expected, _Fp __desired,
1256 			    memory_order __order = memory_order_seq_cst)
1257       noexcept
1258       {
1259 	return compare_exchange_weak(__expected, __desired, __order,
1260                                      __cmpexch_failure_order(__order));
1261       }
1262 
1263       bool
1264       compare_exchange_weak(_Fp& __expected, _Fp __desired,
1265 			    memory_order __order = memory_order_seq_cst)
1266       volatile noexcept
1267       {
1268 	return compare_exchange_weak(__expected, __desired, __order,
1269                                      __cmpexch_failure_order(__order));
1270       }
1271 
1272       bool
1273       compare_exchange_strong(_Fp& __expected, _Fp __desired,
1274 			      memory_order __order = memory_order_seq_cst)
1275       noexcept
1276       {
1277 	return compare_exchange_strong(__expected, __desired, __order,
1278 				       __cmpexch_failure_order(__order));
1279       }
1280 
1281       bool
1282       compare_exchange_strong(_Fp& __expected, _Fp __desired,
1283 			      memory_order __order = memory_order_seq_cst)
1284       volatile noexcept
1285       {
1286 	return compare_exchange_strong(__expected, __desired, __order,
1287 				       __cmpexch_failure_order(__order));
1288       }
1289 
1290 #if __cpp_lib_atomic_wait
1291       _GLIBCXX_ALWAYS_INLINE void
1292       wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1293       { __atomic_impl::wait(&_M_fp, __old, __m); }
1294 
1295       // TODO add const volatile overload
1296 
1297       _GLIBCXX_ALWAYS_INLINE void
1298       notify_one() const noexcept
1299       { __atomic_impl::notify_one(&_M_fp); }
1300 
1301       // TODO add const volatile overload
1302 
1303       _GLIBCXX_ALWAYS_INLINE void
1304       notify_all() const noexcept
1305       { __atomic_impl::notify_all(&_M_fp); }
1306 
1307       // TODO add const volatile overload
1308 #endif // __cpp_lib_atomic_wait
1309 
1310       value_type
1311       fetch_add(value_type __i,
1312 		memory_order __m = memory_order_seq_cst) noexcept
1313       { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1314 
1315       value_type
1316       fetch_add(value_type __i,
1317 		memory_order __m = memory_order_seq_cst) volatile noexcept
1318       { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1319 
1320       value_type
1321       fetch_sub(value_type __i,
1322 		memory_order __m = memory_order_seq_cst) noexcept
1323       { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1324 
1325       value_type
1326       fetch_sub(value_type __i,
1327 		memory_order __m = memory_order_seq_cst) volatile noexcept
1328       { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1329 
1330       value_type
1331       operator+=(value_type __i) noexcept
1332       { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1333 
1334       value_type
1335       operator+=(value_type __i) volatile noexcept
1336       { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1337 
1338       value_type
1339       operator-=(value_type __i) noexcept
1340       { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1341 
1342       value_type
1343       operator-=(value_type __i) volatile noexcept
1344       { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1345 
1346     private:
1347       alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1348     };
1349 #undef _GLIBCXX20_INIT
1350 
1351   template<typename _Tp,
1352 	   bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>>
1353     struct __atomic_ref;
1354 
1355   // base class for non-integral, non-floating-point, non-pointer types
1356   template<typename _Tp>
1357     struct __atomic_ref<_Tp, false, false>
1358     {
1359       static_assert(is_trivially_copyable_v<_Tp>);
1360 
1361       // 1/2/4/8/16-byte types must be aligned to at least their size.
1362       static constexpr int _S_min_alignment
1363 	= (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
1364 	? 0 : sizeof(_Tp);
1365 
1366     public:
1367       using value_type = _Tp;
1368 
1369       static constexpr bool is_always_lock_free
1370 	= __atomic_always_lock_free(sizeof(_Tp), 0);
1371 
1372       static constexpr size_t required_alignment
1373 	= _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1374 
1375       __atomic_ref& operator=(const __atomic_ref&) = delete;
1376 
1377       explicit
1378       __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
1379       { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1380 
1381       __atomic_ref(const __atomic_ref&) noexcept = default;
1382 
1383       _Tp
1384       operator=(_Tp __t) const noexcept
1385       {
1386 	this->store(__t);
1387 	return __t;
1388       }
1389 
1390       operator _Tp() const noexcept { return this->load(); }
1391 
1392       bool
1393       is_lock_free() const noexcept
1394       { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1395 
1396       void
1397       store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1398       { __atomic_impl::store(_M_ptr, __t, __m); }
1399 
1400       _Tp
1401       load(memory_order __m = memory_order_seq_cst) const noexcept
1402       { return __atomic_impl::load(_M_ptr, __m); }
1403 
1404       _Tp
1405       exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1406       const noexcept
1407       { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1408 
1409       bool
1410       compare_exchange_weak(_Tp& __expected, _Tp __desired,
1411 			    memory_order __success,
1412 			    memory_order __failure) const noexcept
1413       {
1414 	return __atomic_impl::compare_exchange_weak(_M_ptr,
1415 						    __expected, __desired,
1416 						    __success, __failure);
1417       }
1418 
1419       bool
1420       compare_exchange_strong(_Tp& __expected, _Tp __desired,
1421 			    memory_order __success,
1422 			    memory_order __failure) const noexcept
1423       {
1424 	return __atomic_impl::compare_exchange_strong(_M_ptr,
1425 						      __expected, __desired,
1426 						      __success, __failure);
1427       }
1428 
1429       bool
1430       compare_exchange_weak(_Tp& __expected, _Tp __desired,
1431 			    memory_order __order = memory_order_seq_cst)
1432       const noexcept
1433       {
1434 	return compare_exchange_weak(__expected, __desired, __order,
1435                                      __cmpexch_failure_order(__order));
1436       }
1437 
1438       bool
1439       compare_exchange_strong(_Tp& __expected, _Tp __desired,
1440 			      memory_order __order = memory_order_seq_cst)
1441       const noexcept
1442       {
1443 	return compare_exchange_strong(__expected, __desired, __order,
1444 				       __cmpexch_failure_order(__order));
1445       }
1446 
1447 #if __cpp_lib_atomic_wait
1448       _GLIBCXX_ALWAYS_INLINE void
1449       wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1450       { __atomic_impl::wait(_M_ptr, __old, __m); }
1451 
1452       // TODO add const volatile overload
1453 
1454       _GLIBCXX_ALWAYS_INLINE void
1455       notify_one() const noexcept
1456       { __atomic_impl::notify_one(_M_ptr); }
1457 
1458       // TODO add const volatile overload
1459 
1460       _GLIBCXX_ALWAYS_INLINE void
1461       notify_all() const noexcept
1462       { __atomic_impl::notify_all(_M_ptr); }
1463 
1464       // TODO add const volatile overload
1465 #endif // __cpp_lib_atomic_wait
1466 
1467     private:
1468       _Tp* _M_ptr;
1469     };
1470 
1471   // base class for atomic_ref<integral-type>
1472   template<typename _Tp>
1473     struct __atomic_ref<_Tp, true, false>
1474     {
1475       static_assert(is_integral_v<_Tp>);
1476 
1477     public:
1478       using value_type = _Tp;
1479       using difference_type = value_type;
1480 
1481       static constexpr bool is_always_lock_free
1482 	= __atomic_always_lock_free(sizeof(_Tp), 0);
1483 
1484       static constexpr size_t required_alignment
1485 	= sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
1486 
1487       __atomic_ref() = delete;
1488       __atomic_ref& operator=(const __atomic_ref&) = delete;
1489 
1490       explicit
1491       __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1492       { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1493 
1494       __atomic_ref(const __atomic_ref&) noexcept = default;
1495 
1496       _Tp
1497       operator=(_Tp __t) const noexcept
1498       {
1499 	this->store(__t);
1500 	return __t;
1501       }
1502 
1503       operator _Tp() const noexcept { return this->load(); }
1504 
1505       bool
1506       is_lock_free() const noexcept
1507       {
1508 	return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1509       }
1510 
1511       void
1512       store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1513       { __atomic_impl::store(_M_ptr, __t, __m); }
1514 
1515       _Tp
1516       load(memory_order __m = memory_order_seq_cst) const noexcept
1517       { return __atomic_impl::load(_M_ptr, __m); }
1518 
1519       _Tp
1520       exchange(_Tp __desired,
1521 	       memory_order __m = memory_order_seq_cst) const noexcept
1522       { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1523 
1524       bool
1525       compare_exchange_weak(_Tp& __expected, _Tp __desired,
1526 			    memory_order __success,
1527 			    memory_order __failure) const noexcept
1528       {
1529 	return __atomic_impl::compare_exchange_weak(_M_ptr,
1530 						    __expected, __desired,
1531 						    __success, __failure);
1532       }
1533 
1534       bool
1535       compare_exchange_strong(_Tp& __expected, _Tp __desired,
1536 			      memory_order __success,
1537 			      memory_order __failure) const noexcept
1538       {
1539 	return __atomic_impl::compare_exchange_strong(_M_ptr,
1540 						      __expected, __desired,
1541 						      __success, __failure);
1542       }
1543 
1544       bool
1545       compare_exchange_weak(_Tp& __expected, _Tp __desired,
1546 			    memory_order __order = memory_order_seq_cst)
1547       const noexcept
1548       {
1549 	return compare_exchange_weak(__expected, __desired, __order,
1550                                      __cmpexch_failure_order(__order));
1551       }
1552 
1553       bool
1554       compare_exchange_strong(_Tp& __expected, _Tp __desired,
1555 			      memory_order __order = memory_order_seq_cst)
1556       const noexcept
1557       {
1558 	return compare_exchange_strong(__expected, __desired, __order,
1559 				       __cmpexch_failure_order(__order));
1560       }
1561 
1562 #if __cpp_lib_atomic_wait
1563       _GLIBCXX_ALWAYS_INLINE void
1564       wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1565       { __atomic_impl::wait(_M_ptr, __old, __m); }
1566 
1567       // TODO add const volatile overload
1568 
1569       _GLIBCXX_ALWAYS_INLINE void
1570       notify_one() const noexcept
1571       { __atomic_impl::notify_one(_M_ptr); }
1572 
1573       // TODO add const volatile overload
1574 
1575       _GLIBCXX_ALWAYS_INLINE void
1576       notify_all() const noexcept
1577       { __atomic_impl::notify_all(_M_ptr); }
1578 
1579       // TODO add const volatile overload
1580 #endif // __cpp_lib_atomic_wait
1581 
1582       value_type
1583       fetch_add(value_type __i,
1584 		memory_order __m = memory_order_seq_cst) const noexcept
1585       { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1586 
1587       value_type
1588       fetch_sub(value_type __i,
1589 		memory_order __m = memory_order_seq_cst) const noexcept
1590       { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1591 
1592       value_type
1593       fetch_and(value_type __i,
1594 		memory_order __m = memory_order_seq_cst) const noexcept
1595       { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1596 
1597       value_type
1598       fetch_or(value_type __i,
1599 	       memory_order __m = memory_order_seq_cst) const noexcept
1600       { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1601 
1602       value_type
1603       fetch_xor(value_type __i,
1604 		memory_order __m = memory_order_seq_cst) const noexcept
1605       { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1606 
1607       _GLIBCXX_ALWAYS_INLINE value_type
1608       operator++(int) const noexcept
1609       { return fetch_add(1); }
1610 
1611       _GLIBCXX_ALWAYS_INLINE value_type
1612       operator--(int) const noexcept
1613       { return fetch_sub(1); }
1614 
1615       value_type
1616       operator++() const noexcept
1617       { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1618 
1619       value_type
1620       operator--() const noexcept
1621       { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1622 
1623       value_type
1624       operator+=(value_type __i) const noexcept
1625       { return __atomic_impl::__add_fetch(_M_ptr, __i); }
1626 
1627       value_type
1628       operator-=(value_type __i) const noexcept
1629       { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1630 
1631       value_type
1632       operator&=(value_type __i) const noexcept
1633       { return __atomic_impl::__and_fetch(_M_ptr, __i); }
1634 
1635       value_type
1636       operator|=(value_type __i) const noexcept
1637       { return __atomic_impl::__or_fetch(_M_ptr, __i); }
1638 
1639       value_type
1640       operator^=(value_type __i) const noexcept
1641       { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1642 
1643     private:
1644       _Tp* _M_ptr;
1645     };
1646 
1647   // base class for atomic_ref<floating-point-type>
1648   template<typename _Fp>
1649     struct __atomic_ref<_Fp, false, true>
1650     {
1651       static_assert(is_floating_point_v<_Fp>);
1652 
1653     public:
1654       using value_type = _Fp;
1655       using difference_type = value_type;
1656 
1657       static constexpr bool is_always_lock_free
1658 	= __atomic_always_lock_free(sizeof(_Fp), 0);
1659 
1660       static constexpr size_t required_alignment = __alignof__(_Fp);
1661 
1662       __atomic_ref() = delete;
1663       __atomic_ref& operator=(const __atomic_ref&) = delete;
1664 
1665       explicit
1666       __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1667       { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1668 
1669       __atomic_ref(const __atomic_ref&) noexcept = default;
1670 
1671       _Fp
1672       operator=(_Fp __t) const noexcept
1673       {
1674 	this->store(__t);
1675 	return __t;
1676       }
1677 
1678       operator _Fp() const noexcept { return this->load(); }
1679 
1680       bool
1681       is_lock_free() const noexcept
1682       {
1683 	return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1684       }
1685 
1686       void
1687       store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
1688       { __atomic_impl::store(_M_ptr, __t, __m); }
1689 
1690       _Fp
1691       load(memory_order __m = memory_order_seq_cst) const noexcept
1692       { return __atomic_impl::load(_M_ptr, __m); }
1693 
1694       _Fp
1695       exchange(_Fp __desired,
1696 	       memory_order __m = memory_order_seq_cst) const noexcept
1697       { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1698 
1699       bool
1700       compare_exchange_weak(_Fp& __expected, _Fp __desired,
1701 			    memory_order __success,
1702 			    memory_order __failure) const noexcept
1703       {
1704 	return __atomic_impl::compare_exchange_weak(_M_ptr,
1705 						    __expected, __desired,
1706 						    __success, __failure);
1707       }
1708 
1709       bool
1710       compare_exchange_strong(_Fp& __expected, _Fp __desired,
1711 			    memory_order __success,
1712 			    memory_order __failure) const noexcept
1713       {
1714 	return __atomic_impl::compare_exchange_strong(_M_ptr,
1715 						      __expected, __desired,
1716 						      __success, __failure);
1717       }
1718 
1719       bool
1720       compare_exchange_weak(_Fp& __expected, _Fp __desired,
1721 			    memory_order __order = memory_order_seq_cst)
1722       const noexcept
1723       {
1724 	return compare_exchange_weak(__expected, __desired, __order,
1725                                      __cmpexch_failure_order(__order));
1726       }
1727 
1728       bool
1729       compare_exchange_strong(_Fp& __expected, _Fp __desired,
1730 			      memory_order __order = memory_order_seq_cst)
1731       const noexcept
1732       {
1733 	return compare_exchange_strong(__expected, __desired, __order,
1734 				       __cmpexch_failure_order(__order));
1735       }
1736 
1737 #if __cpp_lib_atomic_wait
1738       _GLIBCXX_ALWAYS_INLINE void
1739       wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1740       { __atomic_impl::wait(_M_ptr, __old, __m); }
1741 
1742       // TODO add const volatile overload
1743 
1744       _GLIBCXX_ALWAYS_INLINE void
1745       notify_one() const noexcept
1746       { __atomic_impl::notify_one(_M_ptr); }
1747 
1748       // TODO add const volatile overload
1749 
1750       _GLIBCXX_ALWAYS_INLINE void
1751       notify_all() const noexcept
1752       { __atomic_impl::notify_all(_M_ptr); }
1753 
1754       // TODO add const volatile overload
1755 #endif // __cpp_lib_atomic_wait
1756 
1757       value_type
1758       fetch_add(value_type __i,
1759 		memory_order __m = memory_order_seq_cst) const noexcept
1760       { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1761 
1762       value_type
1763       fetch_sub(value_type __i,
1764 		memory_order __m = memory_order_seq_cst) const noexcept
1765       { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1766 
1767       value_type
1768       operator+=(value_type __i) const noexcept
1769       { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1770 
1771       value_type
1772       operator-=(value_type __i) const noexcept
1773       { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1774 
1775     private:
1776       _Fp* _M_ptr;
1777     };
1778 
1779   // base class for atomic_ref<pointer-type>
1780   template<typename _Tp>
1781     struct __atomic_ref<_Tp*, false, false>
1782     {
1783     public:
1784       using value_type = _Tp*;
1785       using difference_type = ptrdiff_t;
1786 
1787       static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1788 
1789       static constexpr size_t required_alignment = __alignof__(_Tp*);
1790 
1791       __atomic_ref() = delete;
1792       __atomic_ref& operator=(const __atomic_ref&) = delete;
1793 
1794       explicit
1795       __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
1796       { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1797 
1798       __atomic_ref(const __atomic_ref&) noexcept = default;
1799 
1800       _Tp*
1801       operator=(_Tp* __t) const noexcept
1802       {
1803 	this->store(__t);
1804 	return __t;
1805       }
1806 
1807       operator _Tp*() const noexcept { return this->load(); }
1808 
1809       bool
1810       is_lock_free() const noexcept
1811       {
1812 	return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1813       }
1814 
1815       void
1816       store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
1817       { __atomic_impl::store(_M_ptr, __t, __m); }
1818 
1819       _Tp*
1820       load(memory_order __m = memory_order_seq_cst) const noexcept
1821       { return __atomic_impl::load(_M_ptr, __m); }
1822 
1823       _Tp*
1824       exchange(_Tp* __desired,
1825 	       memory_order __m = memory_order_seq_cst) const noexcept
1826       { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1827 
1828       bool
1829       compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1830 			    memory_order __success,
1831 			    memory_order __failure) const noexcept
1832       {
1833 	return __atomic_impl::compare_exchange_weak(_M_ptr,
1834 						    __expected, __desired,
1835 						    __success, __failure);
1836       }
1837 
1838       bool
1839       compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1840 			    memory_order __success,
1841 			    memory_order __failure) const noexcept
1842       {
1843 	return __atomic_impl::compare_exchange_strong(_M_ptr,
1844 						      __expected, __desired,
1845 						      __success, __failure);
1846       }
1847 
1848       bool
1849       compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1850 			    memory_order __order = memory_order_seq_cst)
1851       const noexcept
1852       {
1853 	return compare_exchange_weak(__expected, __desired, __order,
1854                                      __cmpexch_failure_order(__order));
1855       }
1856 
1857       bool
1858       compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1859 			      memory_order __order = memory_order_seq_cst)
1860       const noexcept
1861       {
1862 	return compare_exchange_strong(__expected, __desired, __order,
1863 				       __cmpexch_failure_order(__order));
1864       }
1865 
1866 #if __cpp_lib_atomic_wait
1867       _GLIBCXX_ALWAYS_INLINE void
1868       wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept
1869       { __atomic_impl::wait(_M_ptr, __old, __m); }
1870 
1871       // TODO add const volatile overload
1872 
1873       _GLIBCXX_ALWAYS_INLINE void
1874       notify_one() const noexcept
1875       { __atomic_impl::notify_one(_M_ptr); }
1876 
1877       // TODO add const volatile overload
1878 
1879       _GLIBCXX_ALWAYS_INLINE void
1880       notify_all() const noexcept
1881       { __atomic_impl::notify_all(_M_ptr); }
1882 
1883       // TODO add const volatile overload
1884 #endif // __cpp_lib_atomic_wait
1885 
1886       _GLIBCXX_ALWAYS_INLINE value_type
1887       fetch_add(difference_type __d,
1888 		memory_order __m = memory_order_seq_cst) const noexcept
1889       { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1890 
1891       _GLIBCXX_ALWAYS_INLINE value_type
1892       fetch_sub(difference_type __d,
1893 		memory_order __m = memory_order_seq_cst) const noexcept
1894       { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1895 
1896       value_type
1897       operator++(int) const noexcept
1898       { return fetch_add(1); }
1899 
1900       value_type
1901       operator--(int) const noexcept
1902       { return fetch_sub(1); }
1903 
1904       value_type
1905       operator++() const noexcept
1906       {
1907 	return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1908       }
1909 
1910       value_type
1911       operator--() const noexcept
1912       {
1913 	return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1914       }
1915 
1916       value_type
1917       operator+=(difference_type __d) const noexcept
1918       {
1919 	return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1920       }
1921 
1922       value_type
1923       operator-=(difference_type __d) const noexcept
1924       {
1925 	return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1926       }
1927 
1928     private:
1929       static constexpr ptrdiff_t
1930       _S_type_size(ptrdiff_t __d) noexcept
1931       {
1932 	static_assert(is_object_v<_Tp>);
1933 	return __d * sizeof(_Tp);
1934       }
1935 
1936       _Tp** _M_ptr;
1937     };
1938 
1939 #endif // C++2a
1940 
1941   /// @} group atomics
1942 
1943 _GLIBCXX_END_NAMESPACE_VERSION
1944 } // namespace std
1945 
1946 #endif
1947