xref: /netbsd-src/external/bsd/openldap/dist/servers/lloadd/epoch.h (revision 549b59ed3ccf0d36d3097190a0db27b770f3a839)
1 /*	$NetBSD: epoch.h,v 1.2 2021/08/14 16:14:58 christos Exp $	*/
2 
3 /* epoch.h - epoch based memory reclamation */
4 /* $OpenLDAP$ */
5 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
6  *
7  * Copyright 2018-2021 The OpenLDAP Foundation.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted only as authorized by the OpenLDAP
12  * Public License.
13  *
14  * A copy of this license is available in the file LICENSE in the
15  * top-level directory of the distribution or, alternatively, at
16  * <http://www.OpenLDAP.org/license.html>.
17  */
18 
19 #ifndef __LLOAD_EPOCH_H
20 #define __LLOAD_EPOCH_H
21 
22 /** @file epoch.h
23  *
24  * Implementation of epoch based memory reclamation, in principle
25  * similar to the algorithm presented in
26  * https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf
27  */
28 
29 typedef uintptr_t epoch_t;
30 
31 /** @brief A callback function used to free object and associated data */
32 typedef void (dispose_cb)( void *object );
33 
34 /** @brief Initiate global state */
35 void epoch_init( void );
36 
37 /** @brief Finalise global state and free any objects still pending */
38 void epoch_shutdown( void );
39 
40 /** @brief Register thread as active
41  *
42  * In order to safely access managed objects, a thread should call
43  * this function or make sure no other thread is running (e.g. config
44  * pause, late shutdown). After calling this, it is guaranteed that no
45  * reachable objects will be freed before all threads have called
46  * `epoch_leave( current_epoch + 1 )` so it is essential that there
47  * is an upper limit to the amount of time between #epoch_join and
48  * corresponding #epoch_leave or the number of unfreed objects might
49  * grow without bounds.
50  *
51  * To simplify locking, memory is only freed when the current epoch
52  * is advanced rather than on leaving it.
53  *
54  * Can be safely called multiple times by the same thread as long as
55  * a matching #epoch_leave() call is made eventually.
56  *
57  * @return The observed epoch, to be passed to #epoch_leave()
58  */
59 epoch_t epoch_join( void );
60 
61 /** @brief Register thread as inactive
62  *
63  * A thread should call this after they are finished with work
64  * performed since matching call to #epoch_join(). It is not safe
65  * to keep a local reference to managed objects after this call
66  * unless other precautions have been made to prevent it being
67  * released.
68  *
69  * @param[in] epoch Epoch identifier returned by a previous call to
70  * #epoch_join().
71  */
72 void epoch_leave( epoch_t epoch );
73 
74 /** @brief Return an unreachable object to be freed
75  *
76  * The object should already be unreachable at the point of call and
77  * cb will be invoked when no other thread that could have seen it
78  * is active any more. This happens when we have advanced by two
79  * epochs.
80  *
81  * @param[in] ptr Object to be released/freed
82  * @param[in] cb Callback to invoke when safe to do so
83  */
84 void epoch_append( void *ptr, dispose_cb *cb );
85 
86 /**
87  * \defgroup Reference counting helpers
88  */
89 /**@{*/
90 
91 /** @brief Acquire a reference if possible
92  *
93  * Atomically, check reference count is non-zero and increment if so.
94  * Returns old reference count.
95  *
96  * @param[in] refp Pointer to a reference counter
97  * @return 0 if reference was already zero, non-zero if reference
98  * count was successfully incremented
99  */
100 int acquire_ref( uintptr_t *refp );
101 
102 /** @brief Check reference count and try to decrement
103  *
104  * Atomically, decrement reference count if non-zero and register
105  * object if decremented to zero. Returning previous reference count.
106  *
107  * @param[in] refp Pointer to a reference counter
108  * @param[in] object The managed object
109  * @param[in] cb Callback to invoke when safe to do so
110  * @return 0 if reference was already zero, non-zero if reference
111  * count was non-zero at the time of call
112  */
113 int try_release_ref( uintptr_t *refp, void *object, dispose_cb *cb );
114 
115 /** @brief Read reference count
116  *
117  * @param[in] object Pointer to the managed object
118  * @param[in] ref_field Member where reference count is stored in
119  * the object
120  * @return Current value of reference counter
121  */
122 #define IS_ALIVE( object, ref_field ) \
123     __atomic_load_n( &(object)->ref_field, __ATOMIC_ACQUIRE )
124 
125 /** @brief Release reference
126  *
127  * A cheaper alternative to #try_release_ref(), safe only when we know
128  * reference count was already non-zero.
129  *
130  * @param[in] object The managed object
131  * @param[in] ref_field Member where reference count is stored in
132  * the object
133  * @param[in] cb Callback to invoke when safe to do so
134  */
135 #define RELEASE_REF( object, ref_field, cb ) \
136     do { \
137         assert( IS_ALIVE( (object), ref_field ) ); \
138         if ( !__atomic_sub_fetch( \
139                      &(object)->ref_field, 1, __ATOMIC_ACQ_REL ) ) { \
140             epoch_append( object, (dispose_cb *)cb ); \
141         } \
142     } while (0)
143 
144 /**@}*/
145 
146 #endif /* __LLOAD_EPOCH_H */
147