xref: /llvm-project/libcxx/include/__cxx03/__stop_token/stop_state.h (revision ce7771902dc50d900de639d499a60486b83f70e0)
1e78f53d1SNikolas Klauser // -*- C++ -*-
2e78f53d1SNikolas Klauser //===----------------------------------------------------------------------===//
3e78f53d1SNikolas Klauser //
4e78f53d1SNikolas Klauser // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5e78f53d1SNikolas Klauser // See https://llvm.org/LICENSE.txt for license information.
6e78f53d1SNikolas Klauser // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7e78f53d1SNikolas Klauser //
8e78f53d1SNikolas Klauser //===----------------------------------------------------------------------===//
9e78f53d1SNikolas Klauser 
10*ce777190SNikolas Klauser #ifndef _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H
11*ce777190SNikolas Klauser #define _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H
12e78f53d1SNikolas Klauser 
1373fbae83SNikolas Klauser #include <__cxx03/__assert>
1473fbae83SNikolas Klauser #include <__cxx03/__config>
1573fbae83SNikolas Klauser #include <__cxx03/__stop_token/atomic_unique_lock.h>
1673fbae83SNikolas Klauser #include <__cxx03/__stop_token/intrusive_list_view.h>
1773fbae83SNikolas Klauser #include <__cxx03/__thread/id.h>
1873fbae83SNikolas Klauser #include <__cxx03/atomic>
1973fbae83SNikolas Klauser #include <__cxx03/cstdint>
20e78f53d1SNikolas Klauser 
21e78f53d1SNikolas Klauser #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
22e78f53d1SNikolas Klauser #  pragma GCC system_header
23e78f53d1SNikolas Klauser #endif
24e78f53d1SNikolas Klauser 
25e78f53d1SNikolas Klauser _LIBCPP_BEGIN_NAMESPACE_STD
26e78f53d1SNikolas Klauser 
27e78f53d1SNikolas Klauser #if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS)
28e78f53d1SNikolas Klauser 
29e78f53d1SNikolas Klauser struct __stop_callback_base : __intrusive_node_base<__stop_callback_base> {
30e78f53d1SNikolas Klauser   using __callback_fn_t = void(__stop_callback_base*) noexcept;
31e78f53d1SNikolas Klauser   _LIBCPP_HIDE_FROM_ABI explicit __stop_callback_base(__callback_fn_t* __callback_fn) : __callback_fn_(__callback_fn) {}
32e78f53d1SNikolas Klauser 
33e78f53d1SNikolas Klauser   _LIBCPP_HIDE_FROM_ABI void __invoke() noexcept { __callback_fn_(this); }
34e78f53d1SNikolas Klauser 
35e78f53d1SNikolas Klauser   __callback_fn_t* __callback_fn_;
36e78f53d1SNikolas Klauser   atomic<bool> __completed_ = false;
37e78f53d1SNikolas Klauser   bool* __destroyed_        = nullptr;
38e78f53d1SNikolas Klauser };
39e78f53d1SNikolas Klauser 
40e78f53d1SNikolas Klauser class __stop_state {
41e78f53d1SNikolas Klauser   static constexpr uint32_t __stop_requested_bit        = 1;
42e78f53d1SNikolas Klauser   static constexpr uint32_t __callback_list_locked_bit  = 1 << 1;
43e78f53d1SNikolas Klauser   static constexpr uint32_t __stop_source_counter_shift = 2;
44e78f53d1SNikolas Klauser 
45e78f53d1SNikolas Klauser   // The "stop_source counter" is not used for lifetime reference counting.
46e78f53d1SNikolas Klauser   // When the number of stop_source reaches 0, the remaining stop_tokens's
47e78f53d1SNikolas Klauser   // stop_possible will return false. We need this counter to track this.
48e78f53d1SNikolas Klauser   //
49e78f53d1SNikolas Klauser   // The "callback list locked" bit implements the atomic_unique_lock to
50e78f53d1SNikolas Klauser   // guard the operations on the callback list
51e78f53d1SNikolas Klauser   //
52e78f53d1SNikolas Klauser   //       31 - 2          |  1                   |    0           |
53e78f53d1SNikolas Klauser   //  stop_source counter  | callback list locked | stop_requested |
54e78f53d1SNikolas Klauser   atomic<uint32_t> __state_ = 0;
55e78f53d1SNikolas Klauser 
56e78f53d1SNikolas Klauser   // Reference count for stop_token + stop_callback + stop_source
57e78f53d1SNikolas Klauser   // When the counter reaches zero, the state is destroyed
58e78f53d1SNikolas Klauser   // It is used by __intrusive_shared_ptr, but it is stored here for better layout
59e78f53d1SNikolas Klauser   atomic<uint32_t> __ref_count_ = 0;
60e78f53d1SNikolas Klauser 
61e78f53d1SNikolas Klauser   using __state_t            = uint32_t;
62e78f53d1SNikolas Klauser   using __callback_list_lock = __atomic_unique_lock<__state_t, __callback_list_locked_bit>;
63e78f53d1SNikolas Klauser   using __callback_list      = __intrusive_list_view<__stop_callback_base>;
64e78f53d1SNikolas Klauser 
65e78f53d1SNikolas Klauser   __callback_list __callback_list_;
66e78f53d1SNikolas Klauser   __thread_id __requesting_thread_;
67e78f53d1SNikolas Klauser 
68e78f53d1SNikolas Klauser public:
69e78f53d1SNikolas Klauser   _LIBCPP_HIDE_FROM_ABI __stop_state() noexcept = default;
70e78f53d1SNikolas Klauser 
71e78f53d1SNikolas Klauser   _LIBCPP_HIDE_FROM_ABI void __increment_stop_source_counter() noexcept {
72e78f53d1SNikolas Klauser     _LIBCPP_ASSERT_UNCATEGORIZED(
73e78f53d1SNikolas Klauser         __state_.load(std::memory_order_relaxed) <= static_cast<__state_t>(~(1 << __stop_source_counter_shift)),
74e78f53d1SNikolas Klauser         "stop_source's counter reaches the maximum. Incrementing the counter will overflow");
75e78f53d1SNikolas Klauser     __state_.fetch_add(1 << __stop_source_counter_shift, std::memory_order_relaxed);
76e78f53d1SNikolas Klauser   }
77e78f53d1SNikolas Klauser 
78e78f53d1SNikolas Klauser   // We are not destroying the object after counter decrements to zero, nor do we have
79e78f53d1SNikolas Klauser   // operations depend on the ordering of decrementing the counter. relaxed is enough.
80e78f53d1SNikolas Klauser   _LIBCPP_HIDE_FROM_ABI void __decrement_stop_source_counter() noexcept {
81e78f53d1SNikolas Klauser     _LIBCPP_ASSERT_UNCATEGORIZED(
82e78f53d1SNikolas Klauser         __state_.load(std::memory_order_relaxed) >= static_cast<__state_t>(1 << __stop_source_counter_shift),
83e78f53d1SNikolas Klauser         "stop_source's counter is 0. Decrementing the counter will underflow");
84e78f53d1SNikolas Klauser     __state_.fetch_sub(1 << __stop_source_counter_shift, std::memory_order_relaxed);
85e78f53d1SNikolas Klauser   }
86e78f53d1SNikolas Klauser 
87e78f53d1SNikolas Klauser   _LIBCPP_HIDE_FROM_ABI bool __stop_requested() const noexcept {
88e78f53d1SNikolas Klauser     // acquire because [thread.stoptoken.intro] A call to request_stop that returns true
89e78f53d1SNikolas Klauser     // synchronizes with a call to stop_requested on an associated stop_token or stop_source
90e78f53d1SNikolas Klauser     // object that returns true.
91e78f53d1SNikolas Klauser     // request_stop's compare_exchange_weak has release which syncs with this acquire
92e78f53d1SNikolas Klauser     return (__state_.load(std::memory_order_acquire) & __stop_requested_bit) != 0;
93e78f53d1SNikolas Klauser   }
94e78f53d1SNikolas Klauser 
95e78f53d1SNikolas Klauser   _LIBCPP_HIDE_FROM_ABI bool __stop_possible_for_stop_token() const noexcept {
96e78f53d1SNikolas Klauser     // [stoptoken.mem] false if "a stop request was not made and there are no associated stop_source objects"
97e78f53d1SNikolas Klauser     // Todo: Can this be std::memory_order_relaxed as the standard does not say anything except not to introduce data
98e78f53d1SNikolas Klauser     // race?
99e78f53d1SNikolas Klauser     __state_t __curent_state = __state_.load(std::memory_order_acquire);
100e78f53d1SNikolas Klauser     return ((__curent_state & __stop_requested_bit) != 0) || ((__curent_state >> __stop_source_counter_shift) != 0);
101e78f53d1SNikolas Klauser   }
102e78f53d1SNikolas Klauser 
103e78f53d1SNikolas Klauser   _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __request_stop() noexcept {
104e78f53d1SNikolas Klauser     auto __cb_list_lock = __try_lock_for_request_stop();
105e78f53d1SNikolas Klauser     if (!__cb_list_lock.__owns_lock()) {
106e78f53d1SNikolas Klauser       return false;
107e78f53d1SNikolas Klauser     }
108e78f53d1SNikolas Klauser     __requesting_thread_ = this_thread::get_id();
109e78f53d1SNikolas Klauser 
110e78f53d1SNikolas Klauser     while (!__callback_list_.__empty()) {
111e78f53d1SNikolas Klauser       auto __cb = __callback_list_.__pop_front();
112e78f53d1SNikolas Klauser 
113e78f53d1SNikolas Klauser       // allow other callbacks to be removed while invoking the current callback
114e78f53d1SNikolas Klauser       __cb_list_lock.__unlock();
115e78f53d1SNikolas Klauser 
116e78f53d1SNikolas Klauser       bool __destroyed   = false;
117e78f53d1SNikolas Klauser       __cb->__destroyed_ = &__destroyed;
118e78f53d1SNikolas Klauser 
119e78f53d1SNikolas Klauser       __cb->__invoke();
120e78f53d1SNikolas Klauser 
121e78f53d1SNikolas Klauser       // __cb's invoke function could potentially delete itself. We need to check before accessing __cb's member
122e78f53d1SNikolas Klauser       if (!__destroyed) {
123e78f53d1SNikolas Klauser         // needs to set __destroyed_ pointer to nullptr, otherwise it points to a local variable
124e78f53d1SNikolas Klauser         // which is to be destroyed at the end of the loop
125e78f53d1SNikolas Klauser         __cb->__destroyed_ = nullptr;
126e78f53d1SNikolas Klauser 
127e78f53d1SNikolas Klauser         // [stopcallback.cons] If callback is concurrently executing on another thread, then the return
128e78f53d1SNikolas Klauser         // from the invocation of callback strongly happens before ([intro.races]) callback is destroyed.
129e78f53d1SNikolas Klauser         // this release syncs with the acquire in the remove_callback
130e78f53d1SNikolas Klauser         __cb->__completed_.store(true, std::memory_order_release);
131e78f53d1SNikolas Klauser         __cb->__completed_.notify_all();
132e78f53d1SNikolas Klauser       }
133e78f53d1SNikolas Klauser 
134e78f53d1SNikolas Klauser       __cb_list_lock.__lock();
135e78f53d1SNikolas Klauser     }
136e78f53d1SNikolas Klauser 
137e78f53d1SNikolas Klauser     return true;
138e78f53d1SNikolas Klauser   }
139e78f53d1SNikolas Klauser 
140e78f53d1SNikolas Klauser   _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __add_callback(__stop_callback_base* __cb) noexcept {
141e78f53d1SNikolas Klauser     // If it is already stop_requested. Do not try to request it again.
142e78f53d1SNikolas Klauser     const auto __give_up_trying_to_lock_condition = [__cb](__state_t __state) {
143e78f53d1SNikolas Klauser       if ((__state & __stop_requested_bit) != 0) {
144e78f53d1SNikolas Klauser         // already stop requested, synchronously run the callback and no need to lock the list again
145e78f53d1SNikolas Klauser         __cb->__invoke();
146e78f53d1SNikolas Klauser         return true;
147e78f53d1SNikolas Klauser       }
148e78f53d1SNikolas Klauser       // no stop source. no need to lock the list to add the callback as it can never be invoked
149e78f53d1SNikolas Klauser       return (__state >> __stop_source_counter_shift) == 0;
150e78f53d1SNikolas Klauser     };
151e78f53d1SNikolas Klauser 
152e78f53d1SNikolas Klauser     __callback_list_lock __cb_list_lock(__state_, __give_up_trying_to_lock_condition);
153e78f53d1SNikolas Klauser 
154e78f53d1SNikolas Klauser     if (!__cb_list_lock.__owns_lock()) {
155e78f53d1SNikolas Klauser       return false;
156e78f53d1SNikolas Klauser     }
157e78f53d1SNikolas Klauser 
158e78f53d1SNikolas Klauser     __callback_list_.__push_front(__cb);
159e78f53d1SNikolas Klauser 
160e78f53d1SNikolas Klauser     return true;
161e78f53d1SNikolas Klauser     // unlock here: [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of
162e78f53d1SNikolas Klauser     // that callback.
163e78f53d1SNikolas Klauser     // Note: this release sync with the acquire in the request_stop' __try_lock_for_request_stop
164e78f53d1SNikolas Klauser   }
165e78f53d1SNikolas Klauser 
166e78f53d1SNikolas Klauser   // called by the destructor of stop_callback
167e78f53d1SNikolas Klauser   _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __remove_callback(__stop_callback_base* __cb) noexcept {
168e78f53d1SNikolas Klauser     __callback_list_lock __cb_list_lock(__state_);
169e78f53d1SNikolas Klauser 
170e78f53d1SNikolas Klauser     // under below condition, the request_stop call just popped __cb from the list and could execute it now
171e78f53d1SNikolas Klauser     bool __potentially_executing_now = __cb->__prev_ == nullptr && !__callback_list_.__is_head(__cb);
172e78f53d1SNikolas Klauser 
173e78f53d1SNikolas Klauser     if (__potentially_executing_now) {
174e78f53d1SNikolas Klauser       auto __requested_thread = __requesting_thread_;
175e78f53d1SNikolas Klauser       __cb_list_lock.__unlock();
176e78f53d1SNikolas Klauser 
177e78f53d1SNikolas Klauser       if (std::this_thread::get_id() != __requested_thread) {
178e78f53d1SNikolas Klauser         // [stopcallback.cons] If callback is concurrently executing on another thread, then the return
179e78f53d1SNikolas Klauser         // from the invocation of callback strongly happens before ([intro.races]) callback is destroyed.
180e78f53d1SNikolas Klauser         __cb->__completed_.wait(false, std::memory_order_acquire);
181e78f53d1SNikolas Klauser       } else {
182e78f53d1SNikolas Klauser         // The destructor of stop_callback runs on the same thread of the thread that invokes the callback.
183e78f53d1SNikolas Klauser         // The callback is potentially invoking its own destuctor. Set the flag to avoid accessing destroyed
184e78f53d1SNikolas Klauser         // members on the invoking side
185e78f53d1SNikolas Klauser         if (__cb->__destroyed_) {
186e78f53d1SNikolas Klauser           *__cb->__destroyed_ = true;
187e78f53d1SNikolas Klauser         }
188e78f53d1SNikolas Klauser       }
189e78f53d1SNikolas Klauser     } else {
190e78f53d1SNikolas Klauser       __callback_list_.__remove(__cb);
191e78f53d1SNikolas Klauser     }
192e78f53d1SNikolas Klauser   }
193e78f53d1SNikolas Klauser 
194e78f53d1SNikolas Klauser private:
195e78f53d1SNikolas Klauser   _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI __callback_list_lock __try_lock_for_request_stop() noexcept {
196e78f53d1SNikolas Klauser     // If it is already stop_requested, do not try to request stop or lock the list again.
197e78f53d1SNikolas Klauser     const auto __lock_fail_condition = [](__state_t __state) { return (__state & __stop_requested_bit) != 0; };
198e78f53d1SNikolas Klauser 
199e78f53d1SNikolas Klauser     // set locked and requested bit at the same time
200e78f53d1SNikolas Klauser     const auto __after_lock_state = [](__state_t __state) {
201e78f53d1SNikolas Klauser       return __state | __callback_list_locked_bit | __stop_requested_bit;
202e78f53d1SNikolas Klauser     };
203e78f53d1SNikolas Klauser 
204e78f53d1SNikolas Klauser     // acq because [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of that
205e78f53d1SNikolas Klauser     //     callback. We are going to invoke the callback after getting the lock, acquire so that we can see the
206e78f53d1SNikolas Klauser     //     registration of a callback (and other writes that happens-before the add_callback)
207e78f53d1SNikolas Klauser     //     Note: the rel (unlock) in the add_callback syncs with this acq
208e78f53d1SNikolas Klauser     // rel because [thread.stoptoken.intro] A call to request_stop that returns true synchronizes with a call
209e78f53d1SNikolas Klauser     //     to stop_requested on an associated stop_token or stop_source object that returns true.
210e78f53d1SNikolas Klauser     //     We need to make sure that all writes (including user code) before request_stop will be made visible
211e78f53d1SNikolas Klauser     //     to the threads that waiting for `stop_requested == true`
212e78f53d1SNikolas Klauser     //     Note: this rel syncs with the acq in `stop_requested`
213e78f53d1SNikolas Klauser     const auto __locked_ordering = std::memory_order_acq_rel;
214e78f53d1SNikolas Klauser 
215e78f53d1SNikolas Klauser     return __callback_list_lock(__state_, __lock_fail_condition, __after_lock_state, __locked_ordering);
216e78f53d1SNikolas Klauser   }
217e78f53d1SNikolas Klauser 
218e78f53d1SNikolas Klauser   template <class _Tp>
219e78f53d1SNikolas Klauser   friend struct __intrusive_shared_ptr_traits;
220e78f53d1SNikolas Klauser };
221e78f53d1SNikolas Klauser 
222e78f53d1SNikolas Klauser template <class _Tp>
223e78f53d1SNikolas Klauser struct __intrusive_shared_ptr_traits;
224e78f53d1SNikolas Klauser 
225e78f53d1SNikolas Klauser template <>
226e78f53d1SNikolas Klauser struct __intrusive_shared_ptr_traits<__stop_state> {
227e78f53d1SNikolas Klauser   _LIBCPP_HIDE_FROM_ABI static atomic<uint32_t>& __get_atomic_ref_count(__stop_state& __state) {
228e78f53d1SNikolas Klauser     return __state.__ref_count_;
229e78f53d1SNikolas Klauser   }
230e78f53d1SNikolas Klauser };
231e78f53d1SNikolas Klauser 
232e78f53d1SNikolas Klauser #endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS)
233e78f53d1SNikolas Klauser 
234e78f53d1SNikolas Klauser _LIBCPP_END_NAMESPACE_STD
235e78f53d1SNikolas Klauser 
236*ce777190SNikolas Klauser #endif // _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H
237