1*38fd1498Szrj // -*- C++ -*- header.
2*38fd1498Szrj
3*38fd1498Szrj // Copyright (C) 2008-2018 Free Software Foundation, Inc.
4*38fd1498Szrj //
5*38fd1498Szrj // This file is part of the GNU ISO C++ Library. This library is free
6*38fd1498Szrj // software; you can redistribute it and/or modify it under the
7*38fd1498Szrj // terms of the GNU General Public License as published by the
8*38fd1498Szrj // Free Software Foundation; either version 3, or (at your option)
9*38fd1498Szrj // any later version.
10*38fd1498Szrj
11*38fd1498Szrj // This library is distributed in the hope that it will be useful,
12*38fd1498Szrj // but WITHOUT ANY WARRANTY; without even the implied warranty of
13*38fd1498Szrj // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14*38fd1498Szrj // GNU General Public License for more details.
15*38fd1498Szrj
16*38fd1498Szrj // Under Section 7 of GPL version 3, you are granted additional
17*38fd1498Szrj // permissions described in the GCC Runtime Library Exception, version
18*38fd1498Szrj // 3.1, as published by the Free Software Foundation.
19*38fd1498Szrj
20*38fd1498Szrj // You should have received a copy of the GNU General Public License and
21*38fd1498Szrj // a copy of the GCC Runtime Library Exception along with this program;
22*38fd1498Szrj // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23*38fd1498Szrj // <http://www.gnu.org/licenses/>.
24*38fd1498Szrj
25*38fd1498Szrj /** @file bits/atomic_base.h
26*38fd1498Szrj * This is an internal header file, included by other library headers.
27*38fd1498Szrj * Do not attempt to use it directly. @headername{atomic}
28*38fd1498Szrj */
29*38fd1498Szrj
30*38fd1498Szrj #ifndef _GLIBCXX_ATOMIC_BASE_H
31*38fd1498Szrj #define _GLIBCXX_ATOMIC_BASE_H 1
32*38fd1498Szrj
33*38fd1498Szrj #pragma GCC system_header
34*38fd1498Szrj
35*38fd1498Szrj #include <bits/c++config.h>
36*38fd1498Szrj #include <stdint.h>
37*38fd1498Szrj #include <bits/atomic_lockfree_defines.h>
38*38fd1498Szrj
39*38fd1498Szrj #ifndef _GLIBCXX_ALWAYS_INLINE
40*38fd1498Szrj #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
41*38fd1498Szrj #endif
42*38fd1498Szrj
_GLIBCXX_VISIBILITY(default)43*38fd1498Szrj namespace std _GLIBCXX_VISIBILITY(default)
44*38fd1498Szrj {
45*38fd1498Szrj _GLIBCXX_BEGIN_NAMESPACE_VERSION
46*38fd1498Szrj
47*38fd1498Szrj /**
48*38fd1498Szrj * @defgroup atomics Atomics
49*38fd1498Szrj *
50*38fd1498Szrj * Components for performing atomic operations.
51*38fd1498Szrj * @{
52*38fd1498Szrj */
53*38fd1498Szrj
54*38fd1498Szrj /// Enumeration for memory_order
55*38fd1498Szrj typedef enum memory_order
56*38fd1498Szrj {
57*38fd1498Szrj memory_order_relaxed,
58*38fd1498Szrj memory_order_consume,
59*38fd1498Szrj memory_order_acquire,
60*38fd1498Szrj memory_order_release,
61*38fd1498Szrj memory_order_acq_rel,
62*38fd1498Szrj memory_order_seq_cst
63*38fd1498Szrj } memory_order;
64*38fd1498Szrj
65*38fd1498Szrj enum __memory_order_modifier
66*38fd1498Szrj {
67*38fd1498Szrj __memory_order_mask = 0x0ffff,
68*38fd1498Szrj __memory_order_modifier_mask = 0xffff0000,
69*38fd1498Szrj __memory_order_hle_acquire = 0x10000,
70*38fd1498Szrj __memory_order_hle_release = 0x20000
71*38fd1498Szrj };
72*38fd1498Szrj
73*38fd1498Szrj constexpr memory_order
74*38fd1498Szrj operator|(memory_order __m, __memory_order_modifier __mod)
75*38fd1498Szrj {
76*38fd1498Szrj return memory_order(__m | int(__mod));
77*38fd1498Szrj }
78*38fd1498Szrj
79*38fd1498Szrj constexpr memory_order
80*38fd1498Szrj operator&(memory_order __m, __memory_order_modifier __mod)
81*38fd1498Szrj {
82*38fd1498Szrj return memory_order(__m & int(__mod));
83*38fd1498Szrj }
84*38fd1498Szrj
85*38fd1498Szrj // Drop release ordering as per [atomics.types.operations.req]/21
86*38fd1498Szrj constexpr memory_order
87*38fd1498Szrj __cmpexch_failure_order2(memory_order __m) noexcept
88*38fd1498Szrj {
89*38fd1498Szrj return __m == memory_order_acq_rel ? memory_order_acquire
90*38fd1498Szrj : __m == memory_order_release ? memory_order_relaxed : __m;
91*38fd1498Szrj }
92*38fd1498Szrj
93*38fd1498Szrj constexpr memory_order
94*38fd1498Szrj __cmpexch_failure_order(memory_order __m) noexcept
95*38fd1498Szrj {
96*38fd1498Szrj return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
97*38fd1498Szrj | (__m & __memory_order_modifier_mask));
98*38fd1498Szrj }
99*38fd1498Szrj
100*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE void
101*38fd1498Szrj atomic_thread_fence(memory_order __m) noexcept
102*38fd1498Szrj { __atomic_thread_fence(__m); }
103*38fd1498Szrj
104*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE void
105*38fd1498Szrj atomic_signal_fence(memory_order __m) noexcept
106*38fd1498Szrj { __atomic_signal_fence(__m); }
107*38fd1498Szrj
108*38fd1498Szrj /// kill_dependency
109*38fd1498Szrj template<typename _Tp>
110*38fd1498Szrj inline _Tp
111*38fd1498Szrj kill_dependency(_Tp __y) noexcept
112*38fd1498Szrj {
113*38fd1498Szrj _Tp __ret(__y);
114*38fd1498Szrj return __ret;
115*38fd1498Szrj }
116*38fd1498Szrj
117*38fd1498Szrj
118*38fd1498Szrj // Base types for atomics.
119*38fd1498Szrj template<typename _IntTp>
120*38fd1498Szrj struct __atomic_base;
121*38fd1498Szrj
122*38fd1498Szrj
123*38fd1498Szrj #define ATOMIC_VAR_INIT(_VI) { _VI }
124*38fd1498Szrj
125*38fd1498Szrj template<typename _Tp>
126*38fd1498Szrj struct atomic;
127*38fd1498Szrj
128*38fd1498Szrj template<typename _Tp>
129*38fd1498Szrj struct atomic<_Tp*>;
130*38fd1498Szrj
131*38fd1498Szrj /* The target's "set" value for test-and-set may not be exactly 1. */
132*38fd1498Szrj #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
133*38fd1498Szrj typedef bool __atomic_flag_data_type;
134*38fd1498Szrj #else
135*38fd1498Szrj typedef unsigned char __atomic_flag_data_type;
136*38fd1498Szrj #endif
137*38fd1498Szrj
138*38fd1498Szrj /**
139*38fd1498Szrj * @brief Base type for atomic_flag.
140*38fd1498Szrj *
141*38fd1498Szrj * Base type is POD with data, allowing atomic_flag to derive from
142*38fd1498Szrj * it and meet the standard layout type requirement. In addition to
143*38fd1498Szrj * compatibility with a C interface, this allows different
144*38fd1498Szrj * implementations of atomic_flag to use the same atomic operation
145*38fd1498Szrj * functions, via a standard conversion to the __atomic_flag_base
146*38fd1498Szrj * argument.
147*38fd1498Szrj */
148*38fd1498Szrj _GLIBCXX_BEGIN_EXTERN_C
149*38fd1498Szrj
150*38fd1498Szrj struct __atomic_flag_base
151*38fd1498Szrj {
152*38fd1498Szrj __atomic_flag_data_type _M_i;
153*38fd1498Szrj };
154*38fd1498Szrj
155*38fd1498Szrj _GLIBCXX_END_EXTERN_C
156*38fd1498Szrj
157*38fd1498Szrj #define ATOMIC_FLAG_INIT { 0 }
158*38fd1498Szrj
159*38fd1498Szrj /// atomic_flag
160*38fd1498Szrj struct atomic_flag : public __atomic_flag_base
161*38fd1498Szrj {
162*38fd1498Szrj atomic_flag() noexcept = default;
163*38fd1498Szrj ~atomic_flag() noexcept = default;
164*38fd1498Szrj atomic_flag(const atomic_flag&) = delete;
165*38fd1498Szrj atomic_flag& operator=(const atomic_flag&) = delete;
166*38fd1498Szrj atomic_flag& operator=(const atomic_flag&) volatile = delete;
167*38fd1498Szrj
168*38fd1498Szrj // Conversion to ATOMIC_FLAG_INIT.
169*38fd1498Szrj constexpr atomic_flag(bool __i) noexcept
170*38fd1498Szrj : __atomic_flag_base{ _S_init(__i) }
171*38fd1498Szrj { }
172*38fd1498Szrj
173*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
174*38fd1498Szrj test_and_set(memory_order __m = memory_order_seq_cst) noexcept
175*38fd1498Szrj {
176*38fd1498Szrj return __atomic_test_and_set (&_M_i, __m);
177*38fd1498Szrj }
178*38fd1498Szrj
179*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
180*38fd1498Szrj test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
181*38fd1498Szrj {
182*38fd1498Szrj return __atomic_test_and_set (&_M_i, __m);
183*38fd1498Szrj }
184*38fd1498Szrj
185*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE void
186*38fd1498Szrj clear(memory_order __m = memory_order_seq_cst) noexcept
187*38fd1498Szrj {
188*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
189*38fd1498Szrj __glibcxx_assert(__b != memory_order_consume);
190*38fd1498Szrj __glibcxx_assert(__b != memory_order_acquire);
191*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
192*38fd1498Szrj
193*38fd1498Szrj __atomic_clear (&_M_i, __m);
194*38fd1498Szrj }
195*38fd1498Szrj
196*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE void
197*38fd1498Szrj clear(memory_order __m = memory_order_seq_cst) volatile noexcept
198*38fd1498Szrj {
199*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
200*38fd1498Szrj __glibcxx_assert(__b != memory_order_consume);
201*38fd1498Szrj __glibcxx_assert(__b != memory_order_acquire);
202*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
203*38fd1498Szrj
204*38fd1498Szrj __atomic_clear (&_M_i, __m);
205*38fd1498Szrj }
206*38fd1498Szrj
207*38fd1498Szrj private:
208*38fd1498Szrj static constexpr __atomic_flag_data_type
209*38fd1498Szrj _S_init(bool __i)
210*38fd1498Szrj { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
211*38fd1498Szrj };
212*38fd1498Szrj
213*38fd1498Szrj
214*38fd1498Szrj /// Base class for atomic integrals.
215*38fd1498Szrj //
216*38fd1498Szrj // For each of the integral types, define atomic_[integral type] struct
217*38fd1498Szrj //
218*38fd1498Szrj // atomic_bool bool
219*38fd1498Szrj // atomic_char char
220*38fd1498Szrj // atomic_schar signed char
221*38fd1498Szrj // atomic_uchar unsigned char
222*38fd1498Szrj // atomic_short short
223*38fd1498Szrj // atomic_ushort unsigned short
224*38fd1498Szrj // atomic_int int
225*38fd1498Szrj // atomic_uint unsigned int
226*38fd1498Szrj // atomic_long long
227*38fd1498Szrj // atomic_ulong unsigned long
228*38fd1498Szrj // atomic_llong long long
229*38fd1498Szrj // atomic_ullong unsigned long long
230*38fd1498Szrj // atomic_char16_t char16_t
231*38fd1498Szrj // atomic_char32_t char32_t
232*38fd1498Szrj // atomic_wchar_t wchar_t
233*38fd1498Szrj //
234*38fd1498Szrj // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
235*38fd1498Szrj // 8 bytes, since that is what GCC built-in functions for atomic
236*38fd1498Szrj // memory access expect.
237*38fd1498Szrj template<typename _ITp>
238*38fd1498Szrj struct __atomic_base
239*38fd1498Szrj {
240*38fd1498Szrj private:
241*38fd1498Szrj typedef _ITp __int_type;
242*38fd1498Szrj
243*38fd1498Szrj static constexpr int _S_alignment =
244*38fd1498Szrj sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
245*38fd1498Szrj
246*38fd1498Szrj alignas(_S_alignment) __int_type _M_i;
247*38fd1498Szrj
248*38fd1498Szrj public:
249*38fd1498Szrj __atomic_base() noexcept = default;
250*38fd1498Szrj ~__atomic_base() noexcept = default;
251*38fd1498Szrj __atomic_base(const __atomic_base&) = delete;
252*38fd1498Szrj __atomic_base& operator=(const __atomic_base&) = delete;
253*38fd1498Szrj __atomic_base& operator=(const __atomic_base&) volatile = delete;
254*38fd1498Szrj
255*38fd1498Szrj // Requires __int_type convertible to _M_i.
256*38fd1498Szrj constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
257*38fd1498Szrj
258*38fd1498Szrj operator __int_type() const noexcept
259*38fd1498Szrj { return load(); }
260*38fd1498Szrj
261*38fd1498Szrj operator __int_type() const volatile noexcept
262*38fd1498Szrj { return load(); }
263*38fd1498Szrj
264*38fd1498Szrj __int_type
265*38fd1498Szrj operator=(__int_type __i) noexcept
266*38fd1498Szrj {
267*38fd1498Szrj store(__i);
268*38fd1498Szrj return __i;
269*38fd1498Szrj }
270*38fd1498Szrj
271*38fd1498Szrj __int_type
272*38fd1498Szrj operator=(__int_type __i) volatile noexcept
273*38fd1498Szrj {
274*38fd1498Szrj store(__i);
275*38fd1498Szrj return __i;
276*38fd1498Szrj }
277*38fd1498Szrj
278*38fd1498Szrj __int_type
279*38fd1498Szrj operator++(int) noexcept
280*38fd1498Szrj { return fetch_add(1); }
281*38fd1498Szrj
282*38fd1498Szrj __int_type
283*38fd1498Szrj operator++(int) volatile noexcept
284*38fd1498Szrj { return fetch_add(1); }
285*38fd1498Szrj
286*38fd1498Szrj __int_type
287*38fd1498Szrj operator--(int) noexcept
288*38fd1498Szrj { return fetch_sub(1); }
289*38fd1498Szrj
290*38fd1498Szrj __int_type
291*38fd1498Szrj operator--(int) volatile noexcept
292*38fd1498Szrj { return fetch_sub(1); }
293*38fd1498Szrj
294*38fd1498Szrj __int_type
295*38fd1498Szrj operator++() noexcept
296*38fd1498Szrj { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
297*38fd1498Szrj
298*38fd1498Szrj __int_type
299*38fd1498Szrj operator++() volatile noexcept
300*38fd1498Szrj { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
301*38fd1498Szrj
302*38fd1498Szrj __int_type
303*38fd1498Szrj operator--() noexcept
304*38fd1498Szrj { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
305*38fd1498Szrj
306*38fd1498Szrj __int_type
307*38fd1498Szrj operator--() volatile noexcept
308*38fd1498Szrj { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
309*38fd1498Szrj
310*38fd1498Szrj __int_type
311*38fd1498Szrj operator+=(__int_type __i) noexcept
312*38fd1498Szrj { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
313*38fd1498Szrj
314*38fd1498Szrj __int_type
315*38fd1498Szrj operator+=(__int_type __i) volatile noexcept
316*38fd1498Szrj { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
317*38fd1498Szrj
318*38fd1498Szrj __int_type
319*38fd1498Szrj operator-=(__int_type __i) noexcept
320*38fd1498Szrj { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
321*38fd1498Szrj
322*38fd1498Szrj __int_type
323*38fd1498Szrj operator-=(__int_type __i) volatile noexcept
324*38fd1498Szrj { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
325*38fd1498Szrj
326*38fd1498Szrj __int_type
327*38fd1498Szrj operator&=(__int_type __i) noexcept
328*38fd1498Szrj { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
329*38fd1498Szrj
330*38fd1498Szrj __int_type
331*38fd1498Szrj operator&=(__int_type __i) volatile noexcept
332*38fd1498Szrj { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
333*38fd1498Szrj
334*38fd1498Szrj __int_type
335*38fd1498Szrj operator|=(__int_type __i) noexcept
336*38fd1498Szrj { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
337*38fd1498Szrj
338*38fd1498Szrj __int_type
339*38fd1498Szrj operator|=(__int_type __i) volatile noexcept
340*38fd1498Szrj { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
341*38fd1498Szrj
342*38fd1498Szrj __int_type
343*38fd1498Szrj operator^=(__int_type __i) noexcept
344*38fd1498Szrj { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
345*38fd1498Szrj
346*38fd1498Szrj __int_type
347*38fd1498Szrj operator^=(__int_type __i) volatile noexcept
348*38fd1498Szrj { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
349*38fd1498Szrj
350*38fd1498Szrj bool
351*38fd1498Szrj is_lock_free() const noexcept
352*38fd1498Szrj {
353*38fd1498Szrj // Use a fake, minimally aligned pointer.
354*38fd1498Szrj return __atomic_is_lock_free(sizeof(_M_i),
355*38fd1498Szrj reinterpret_cast<void *>(-__alignof(_M_i)));
356*38fd1498Szrj }
357*38fd1498Szrj
358*38fd1498Szrj bool
359*38fd1498Szrj is_lock_free() const volatile noexcept
360*38fd1498Szrj {
361*38fd1498Szrj // Use a fake, minimally aligned pointer.
362*38fd1498Szrj return __atomic_is_lock_free(sizeof(_M_i),
363*38fd1498Szrj reinterpret_cast<void *>(-__alignof(_M_i)));
364*38fd1498Szrj }
365*38fd1498Szrj
366*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE void
367*38fd1498Szrj store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
368*38fd1498Szrj {
369*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
370*38fd1498Szrj __glibcxx_assert(__b != memory_order_acquire);
371*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
372*38fd1498Szrj __glibcxx_assert(__b != memory_order_consume);
373*38fd1498Szrj
374*38fd1498Szrj __atomic_store_n(&_M_i, __i, __m);
375*38fd1498Szrj }
376*38fd1498Szrj
377*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE void
378*38fd1498Szrj store(__int_type __i,
379*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
380*38fd1498Szrj {
381*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
382*38fd1498Szrj __glibcxx_assert(__b != memory_order_acquire);
383*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
384*38fd1498Szrj __glibcxx_assert(__b != memory_order_consume);
385*38fd1498Szrj
386*38fd1498Szrj __atomic_store_n(&_M_i, __i, __m);
387*38fd1498Szrj }
388*38fd1498Szrj
389*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
390*38fd1498Szrj load(memory_order __m = memory_order_seq_cst) const noexcept
391*38fd1498Szrj {
392*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
393*38fd1498Szrj __glibcxx_assert(__b != memory_order_release);
394*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
395*38fd1498Szrj
396*38fd1498Szrj return __atomic_load_n(&_M_i, __m);
397*38fd1498Szrj }
398*38fd1498Szrj
399*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
400*38fd1498Szrj load(memory_order __m = memory_order_seq_cst) const volatile noexcept
401*38fd1498Szrj {
402*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
403*38fd1498Szrj __glibcxx_assert(__b != memory_order_release);
404*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
405*38fd1498Szrj
406*38fd1498Szrj return __atomic_load_n(&_M_i, __m);
407*38fd1498Szrj }
408*38fd1498Szrj
409*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
410*38fd1498Szrj exchange(__int_type __i,
411*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
412*38fd1498Szrj {
413*38fd1498Szrj return __atomic_exchange_n(&_M_i, __i, __m);
414*38fd1498Szrj }
415*38fd1498Szrj
416*38fd1498Szrj
417*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
418*38fd1498Szrj exchange(__int_type __i,
419*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
420*38fd1498Szrj {
421*38fd1498Szrj return __atomic_exchange_n(&_M_i, __i, __m);
422*38fd1498Szrj }
423*38fd1498Szrj
424*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
425*38fd1498Szrj compare_exchange_weak(__int_type& __i1, __int_type __i2,
426*38fd1498Szrj memory_order __m1, memory_order __m2) noexcept
427*38fd1498Szrj {
428*38fd1498Szrj memory_order __b2 = __m2 & __memory_order_mask;
429*38fd1498Szrj memory_order __b1 = __m1 & __memory_order_mask;
430*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_release);
431*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_acq_rel);
432*38fd1498Szrj __glibcxx_assert(__b2 <= __b1);
433*38fd1498Szrj
434*38fd1498Szrj return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
435*38fd1498Szrj }
436*38fd1498Szrj
437*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
438*38fd1498Szrj compare_exchange_weak(__int_type& __i1, __int_type __i2,
439*38fd1498Szrj memory_order __m1,
440*38fd1498Szrj memory_order __m2) volatile noexcept
441*38fd1498Szrj {
442*38fd1498Szrj memory_order __b2 = __m2 & __memory_order_mask;
443*38fd1498Szrj memory_order __b1 = __m1 & __memory_order_mask;
444*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_release);
445*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_acq_rel);
446*38fd1498Szrj __glibcxx_assert(__b2 <= __b1);
447*38fd1498Szrj
448*38fd1498Szrj return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
449*38fd1498Szrj }
450*38fd1498Szrj
451*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
452*38fd1498Szrj compare_exchange_weak(__int_type& __i1, __int_type __i2,
453*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
454*38fd1498Szrj {
455*38fd1498Szrj return compare_exchange_weak(__i1, __i2, __m,
456*38fd1498Szrj __cmpexch_failure_order(__m));
457*38fd1498Szrj }
458*38fd1498Szrj
459*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
460*38fd1498Szrj compare_exchange_weak(__int_type& __i1, __int_type __i2,
461*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
462*38fd1498Szrj {
463*38fd1498Szrj return compare_exchange_weak(__i1, __i2, __m,
464*38fd1498Szrj __cmpexch_failure_order(__m));
465*38fd1498Szrj }
466*38fd1498Szrj
467*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
468*38fd1498Szrj compare_exchange_strong(__int_type& __i1, __int_type __i2,
469*38fd1498Szrj memory_order __m1, memory_order __m2) noexcept
470*38fd1498Szrj {
471*38fd1498Szrj memory_order __b2 = __m2 & __memory_order_mask;
472*38fd1498Szrj memory_order __b1 = __m1 & __memory_order_mask;
473*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_release);
474*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_acq_rel);
475*38fd1498Szrj __glibcxx_assert(__b2 <= __b1);
476*38fd1498Szrj
477*38fd1498Szrj return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
478*38fd1498Szrj }
479*38fd1498Szrj
480*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
481*38fd1498Szrj compare_exchange_strong(__int_type& __i1, __int_type __i2,
482*38fd1498Szrj memory_order __m1,
483*38fd1498Szrj memory_order __m2) volatile noexcept
484*38fd1498Szrj {
485*38fd1498Szrj memory_order __b2 = __m2 & __memory_order_mask;
486*38fd1498Szrj memory_order __b1 = __m1 & __memory_order_mask;
487*38fd1498Szrj
488*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_release);
489*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_acq_rel);
490*38fd1498Szrj __glibcxx_assert(__b2 <= __b1);
491*38fd1498Szrj
492*38fd1498Szrj return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
493*38fd1498Szrj }
494*38fd1498Szrj
495*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
496*38fd1498Szrj compare_exchange_strong(__int_type& __i1, __int_type __i2,
497*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
498*38fd1498Szrj {
499*38fd1498Szrj return compare_exchange_strong(__i1, __i2, __m,
500*38fd1498Szrj __cmpexch_failure_order(__m));
501*38fd1498Szrj }
502*38fd1498Szrj
503*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
504*38fd1498Szrj compare_exchange_strong(__int_type& __i1, __int_type __i2,
505*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
506*38fd1498Szrj {
507*38fd1498Szrj return compare_exchange_strong(__i1, __i2, __m,
508*38fd1498Szrj __cmpexch_failure_order(__m));
509*38fd1498Szrj }
510*38fd1498Szrj
511*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
512*38fd1498Szrj fetch_add(__int_type __i,
513*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
514*38fd1498Szrj { return __atomic_fetch_add(&_M_i, __i, __m); }
515*38fd1498Szrj
516*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
517*38fd1498Szrj fetch_add(__int_type __i,
518*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
519*38fd1498Szrj { return __atomic_fetch_add(&_M_i, __i, __m); }
520*38fd1498Szrj
521*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
522*38fd1498Szrj fetch_sub(__int_type __i,
523*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
524*38fd1498Szrj { return __atomic_fetch_sub(&_M_i, __i, __m); }
525*38fd1498Szrj
526*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
527*38fd1498Szrj fetch_sub(__int_type __i,
528*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
529*38fd1498Szrj { return __atomic_fetch_sub(&_M_i, __i, __m); }
530*38fd1498Szrj
531*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
532*38fd1498Szrj fetch_and(__int_type __i,
533*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
534*38fd1498Szrj { return __atomic_fetch_and(&_M_i, __i, __m); }
535*38fd1498Szrj
536*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
537*38fd1498Szrj fetch_and(__int_type __i,
538*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
539*38fd1498Szrj { return __atomic_fetch_and(&_M_i, __i, __m); }
540*38fd1498Szrj
541*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
542*38fd1498Szrj fetch_or(__int_type __i,
543*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
544*38fd1498Szrj { return __atomic_fetch_or(&_M_i, __i, __m); }
545*38fd1498Szrj
546*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
547*38fd1498Szrj fetch_or(__int_type __i,
548*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
549*38fd1498Szrj { return __atomic_fetch_or(&_M_i, __i, __m); }
550*38fd1498Szrj
551*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
552*38fd1498Szrj fetch_xor(__int_type __i,
553*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
554*38fd1498Szrj { return __atomic_fetch_xor(&_M_i, __i, __m); }
555*38fd1498Szrj
556*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __int_type
557*38fd1498Szrj fetch_xor(__int_type __i,
558*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
559*38fd1498Szrj { return __atomic_fetch_xor(&_M_i, __i, __m); }
560*38fd1498Szrj };
561*38fd1498Szrj
562*38fd1498Szrj
563*38fd1498Szrj /// Partial specialization for pointer types.
564*38fd1498Szrj template<typename _PTp>
565*38fd1498Szrj struct __atomic_base<_PTp*>
566*38fd1498Szrj {
567*38fd1498Szrj private:
568*38fd1498Szrj typedef _PTp* __pointer_type;
569*38fd1498Szrj
570*38fd1498Szrj __pointer_type _M_p;
571*38fd1498Szrj
572*38fd1498Szrj // Factored out to facilitate explicit specialization.
573*38fd1498Szrj constexpr ptrdiff_t
574*38fd1498Szrj _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
575*38fd1498Szrj
576*38fd1498Szrj constexpr ptrdiff_t
577*38fd1498Szrj _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
578*38fd1498Szrj
579*38fd1498Szrj public:
580*38fd1498Szrj __atomic_base() noexcept = default;
581*38fd1498Szrj ~__atomic_base() noexcept = default;
582*38fd1498Szrj __atomic_base(const __atomic_base&) = delete;
583*38fd1498Szrj __atomic_base& operator=(const __atomic_base&) = delete;
584*38fd1498Szrj __atomic_base& operator=(const __atomic_base&) volatile = delete;
585*38fd1498Szrj
586*38fd1498Szrj // Requires __pointer_type convertible to _M_p.
587*38fd1498Szrj constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
588*38fd1498Szrj
589*38fd1498Szrj operator __pointer_type() const noexcept
590*38fd1498Szrj { return load(); }
591*38fd1498Szrj
592*38fd1498Szrj operator __pointer_type() const volatile noexcept
593*38fd1498Szrj { return load(); }
594*38fd1498Szrj
595*38fd1498Szrj __pointer_type
596*38fd1498Szrj operator=(__pointer_type __p) noexcept
597*38fd1498Szrj {
598*38fd1498Szrj store(__p);
599*38fd1498Szrj return __p;
600*38fd1498Szrj }
601*38fd1498Szrj
602*38fd1498Szrj __pointer_type
603*38fd1498Szrj operator=(__pointer_type __p) volatile noexcept
604*38fd1498Szrj {
605*38fd1498Szrj store(__p);
606*38fd1498Szrj return __p;
607*38fd1498Szrj }
608*38fd1498Szrj
609*38fd1498Szrj __pointer_type
610*38fd1498Szrj operator++(int) noexcept
611*38fd1498Szrj { return fetch_add(1); }
612*38fd1498Szrj
613*38fd1498Szrj __pointer_type
614*38fd1498Szrj operator++(int) volatile noexcept
615*38fd1498Szrj { return fetch_add(1); }
616*38fd1498Szrj
617*38fd1498Szrj __pointer_type
618*38fd1498Szrj operator--(int) noexcept
619*38fd1498Szrj { return fetch_sub(1); }
620*38fd1498Szrj
621*38fd1498Szrj __pointer_type
622*38fd1498Szrj operator--(int) volatile noexcept
623*38fd1498Szrj { return fetch_sub(1); }
624*38fd1498Szrj
625*38fd1498Szrj __pointer_type
626*38fd1498Szrj operator++() noexcept
627*38fd1498Szrj { return __atomic_add_fetch(&_M_p, _M_type_size(1),
628*38fd1498Szrj memory_order_seq_cst); }
629*38fd1498Szrj
630*38fd1498Szrj __pointer_type
631*38fd1498Szrj operator++() volatile noexcept
632*38fd1498Szrj { return __atomic_add_fetch(&_M_p, _M_type_size(1),
633*38fd1498Szrj memory_order_seq_cst); }
634*38fd1498Szrj
635*38fd1498Szrj __pointer_type
636*38fd1498Szrj operator--() noexcept
637*38fd1498Szrj { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
638*38fd1498Szrj memory_order_seq_cst); }
639*38fd1498Szrj
640*38fd1498Szrj __pointer_type
641*38fd1498Szrj operator--() volatile noexcept
642*38fd1498Szrj { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
643*38fd1498Szrj memory_order_seq_cst); }
644*38fd1498Szrj
645*38fd1498Szrj __pointer_type
646*38fd1498Szrj operator+=(ptrdiff_t __d) noexcept
647*38fd1498Szrj { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
648*38fd1498Szrj memory_order_seq_cst); }
649*38fd1498Szrj
650*38fd1498Szrj __pointer_type
651*38fd1498Szrj operator+=(ptrdiff_t __d) volatile noexcept
652*38fd1498Szrj { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
653*38fd1498Szrj memory_order_seq_cst); }
654*38fd1498Szrj
655*38fd1498Szrj __pointer_type
656*38fd1498Szrj operator-=(ptrdiff_t __d) noexcept
657*38fd1498Szrj { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
658*38fd1498Szrj memory_order_seq_cst); }
659*38fd1498Szrj
660*38fd1498Szrj __pointer_type
661*38fd1498Szrj operator-=(ptrdiff_t __d) volatile noexcept
662*38fd1498Szrj { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
663*38fd1498Szrj memory_order_seq_cst); }
664*38fd1498Szrj
665*38fd1498Szrj bool
666*38fd1498Szrj is_lock_free() const noexcept
667*38fd1498Szrj {
668*38fd1498Szrj // Produce a fake, minimally aligned pointer.
669*38fd1498Szrj return __atomic_is_lock_free(sizeof(_M_p),
670*38fd1498Szrj reinterpret_cast<void *>(-__alignof(_M_p)));
671*38fd1498Szrj }
672*38fd1498Szrj
673*38fd1498Szrj bool
674*38fd1498Szrj is_lock_free() const volatile noexcept
675*38fd1498Szrj {
676*38fd1498Szrj // Produce a fake, minimally aligned pointer.
677*38fd1498Szrj return __atomic_is_lock_free(sizeof(_M_p),
678*38fd1498Szrj reinterpret_cast<void *>(-__alignof(_M_p)));
679*38fd1498Szrj }
680*38fd1498Szrj
681*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE void
682*38fd1498Szrj store(__pointer_type __p,
683*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
684*38fd1498Szrj {
685*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
686*38fd1498Szrj
687*38fd1498Szrj __glibcxx_assert(__b != memory_order_acquire);
688*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
689*38fd1498Szrj __glibcxx_assert(__b != memory_order_consume);
690*38fd1498Szrj
691*38fd1498Szrj __atomic_store_n(&_M_p, __p, __m);
692*38fd1498Szrj }
693*38fd1498Szrj
694*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE void
695*38fd1498Szrj store(__pointer_type __p,
696*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
697*38fd1498Szrj {
698*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
699*38fd1498Szrj __glibcxx_assert(__b != memory_order_acquire);
700*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
701*38fd1498Szrj __glibcxx_assert(__b != memory_order_consume);
702*38fd1498Szrj
703*38fd1498Szrj __atomic_store_n(&_M_p, __p, __m);
704*38fd1498Szrj }
705*38fd1498Szrj
706*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __pointer_type
707*38fd1498Szrj load(memory_order __m = memory_order_seq_cst) const noexcept
708*38fd1498Szrj {
709*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
710*38fd1498Szrj __glibcxx_assert(__b != memory_order_release);
711*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
712*38fd1498Szrj
713*38fd1498Szrj return __atomic_load_n(&_M_p, __m);
714*38fd1498Szrj }
715*38fd1498Szrj
716*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __pointer_type
717*38fd1498Szrj load(memory_order __m = memory_order_seq_cst) const volatile noexcept
718*38fd1498Szrj {
719*38fd1498Szrj memory_order __b = __m & __memory_order_mask;
720*38fd1498Szrj __glibcxx_assert(__b != memory_order_release);
721*38fd1498Szrj __glibcxx_assert(__b != memory_order_acq_rel);
722*38fd1498Szrj
723*38fd1498Szrj return __atomic_load_n(&_M_p, __m);
724*38fd1498Szrj }
725*38fd1498Szrj
726*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __pointer_type
727*38fd1498Szrj exchange(__pointer_type __p,
728*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
729*38fd1498Szrj {
730*38fd1498Szrj return __atomic_exchange_n(&_M_p, __p, __m);
731*38fd1498Szrj }
732*38fd1498Szrj
733*38fd1498Szrj
734*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __pointer_type
735*38fd1498Szrj exchange(__pointer_type __p,
736*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
737*38fd1498Szrj {
738*38fd1498Szrj return __atomic_exchange_n(&_M_p, __p, __m);
739*38fd1498Szrj }
740*38fd1498Szrj
741*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
742*38fd1498Szrj compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
743*38fd1498Szrj memory_order __m1,
744*38fd1498Szrj memory_order __m2) noexcept
745*38fd1498Szrj {
746*38fd1498Szrj memory_order __b2 = __m2 & __memory_order_mask;
747*38fd1498Szrj memory_order __b1 = __m1 & __memory_order_mask;
748*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_release);
749*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_acq_rel);
750*38fd1498Szrj __glibcxx_assert(__b2 <= __b1);
751*38fd1498Szrj
752*38fd1498Szrj return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
753*38fd1498Szrj }
754*38fd1498Szrj
755*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE bool
756*38fd1498Szrj compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
757*38fd1498Szrj memory_order __m1,
758*38fd1498Szrj memory_order __m2) volatile noexcept
759*38fd1498Szrj {
760*38fd1498Szrj memory_order __b2 = __m2 & __memory_order_mask;
761*38fd1498Szrj memory_order __b1 = __m1 & __memory_order_mask;
762*38fd1498Szrj
763*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_release);
764*38fd1498Szrj __glibcxx_assert(__b2 != memory_order_acq_rel);
765*38fd1498Szrj __glibcxx_assert(__b2 <= __b1);
766*38fd1498Szrj
767*38fd1498Szrj return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
768*38fd1498Szrj }
769*38fd1498Szrj
770*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __pointer_type
771*38fd1498Szrj fetch_add(ptrdiff_t __d,
772*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
773*38fd1498Szrj { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
774*38fd1498Szrj
775*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __pointer_type
776*38fd1498Szrj fetch_add(ptrdiff_t __d,
777*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
778*38fd1498Szrj { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
779*38fd1498Szrj
780*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __pointer_type
781*38fd1498Szrj fetch_sub(ptrdiff_t __d,
782*38fd1498Szrj memory_order __m = memory_order_seq_cst) noexcept
783*38fd1498Szrj { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
784*38fd1498Szrj
785*38fd1498Szrj _GLIBCXX_ALWAYS_INLINE __pointer_type
786*38fd1498Szrj fetch_sub(ptrdiff_t __d,
787*38fd1498Szrj memory_order __m = memory_order_seq_cst) volatile noexcept
788*38fd1498Szrj { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
789*38fd1498Szrj };
790*38fd1498Szrj
791*38fd1498Szrj // @} group atomics
792*38fd1498Szrj
793*38fd1498Szrj _GLIBCXX_END_NAMESPACE_VERSION
794*38fd1498Szrj } // namespace std
795*38fd1498Szrj
796*38fd1498Szrj #endif
797