xref: /netbsd-src/external/bsd/libevent/dist/evthread-internal.h (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: evthread-internal.h,v 1.3 2017/01/31 23:17:39 christos Exp $	*/
2 /*
3  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 #ifndef EVTHREAD_INTERNAL_H_INCLUDED_
28 #define EVTHREAD_INTERNAL_H_INCLUDED_
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #include "event2/event-config.h"
35 #include "evconfig-private.h"
36 
37 #include "event2/thread.h"
38 #include "util-internal.h"
39 
40 struct event_base;
41 
42 #ifndef _WIN32
43 /* On Windows, the way we currently make DLLs, it's not allowed for us to
44  * have shared global structures.  Thus, we only do the direct-call-to-function
45  * code path if we know that the local shared library system supports it.
46  */
47 #define EVTHREAD_EXPOSE_STRUCTS
48 #endif
49 
50 #if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
51 /* Global function pointers to lock-related functions. NULL if locking isn't
52    enabled. */
53 extern struct evthread_lock_callbacks evthread_lock_fns_;
54 extern struct evthread_condition_callbacks evthread_cond_fns_;
55 extern unsigned long (*evthread_id_fn_)(void);
56 extern int evthread_lock_debugging_enabled_;
57 
58 /** Return the ID of the current thread, or 1 if threading isn't enabled. */
59 #define EVTHREAD_GET_ID() \
60 	(evthread_id_fn_ ? evthread_id_fn_() : 1)
61 
62 /** Return true iff we're in the thread that is currently (or most recently)
63  * running a given event_base's loop. Requires lock. */
64 #define EVBASE_IN_THREAD(base)				 \
65 	(evthread_id_fn_ == NULL ||			 \
66 	(base)->th_owner_id == evthread_id_fn_())
67 
68 /** Return true iff we need to notify the base's main thread about changes to
69  * its state, because it's currently running the main loop in another
70  * thread. Requires lock. */
71 #define EVBASE_NEED_NOTIFY(base)			 \
72 	(evthread_id_fn_ != NULL &&			 \
73 	    (base)->running_loop &&			 \
74 	    (base)->th_owner_id != evthread_id_fn_())
75 
76 /** Allocate a new lock, and store it in lockvar, a void*.  Sets lockvar to
77     NULL if locking is not enabled. */
78 #define EVTHREAD_ALLOC_LOCK(lockvar, locktype)		\
79 	((lockvar) = evthread_lock_fns_.alloc ?		\
80 	    evthread_lock_fns_.alloc(locktype) : NULL)
81 
82 /** Free a given lock, if it is present and locking is enabled. */
83 #define EVTHREAD_FREE_LOCK(lockvar, locktype)				\
84 	do {								\
85 		void *lock_tmp_ = (lockvar);				\
86 		if (lock_tmp_ && evthread_lock_fns_.free)		\
87 			evthread_lock_fns_.free(lock_tmp_, (locktype)); \
88 	} while (/*CONSTCOND*/0)
89 
90 /** Acquire a lock. */
91 #define EVLOCK_LOCK(lockvar,mode)					\
92 	do {								\
93 		if (lockvar)						\
94 			evthread_lock_fns_.lock(mode, lockvar);		\
95 	} while (/*CONSTCOND*/0)
96 
97 /** Release a lock */
98 #define EVLOCK_UNLOCK(lockvar,mode)					\
99 	do {								\
100 		if (lockvar)						\
101 			evthread_lock_fns_.unlock(mode, lockvar);	\
102 	} while (/*CONSTCOND*/0)
103 
104 /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
105 #define EVLOCK_SORTLOCKS_(lockvar1, lockvar2)				\
106 	do {								\
107 		if (lockvar1 && lockvar2 && lockvar1 > lockvar2) {	\
108 			void *tmp = lockvar1;				\
109 			lockvar1 = lockvar2;				\
110 			lockvar2 = tmp;					\
111 		}							\
112 	} while (/*CONSTCOND*/0)
113 
114 /** Lock an event_base, if it is set up for locking.  Acquires the lock
115     in the base structure whose field is named 'lockvar'. */
116 #define EVBASE_ACQUIRE_LOCK(base, lockvar) do {				\
117 		EVLOCK_LOCK((base)->lockvar, 0);			\
118 	} while (/*CONSTCOND*/0)
119 
120 /** Unlock an event_base, if it is set up for locking. */
121 #define EVBASE_RELEASE_LOCK(base, lockvar) do {				\
122 		EVLOCK_UNLOCK((base)->lockvar, 0);			\
123 	} while (/*CONSTCOND*/0)
124 
125 /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
126  * locked and held by us. */
127 #define EVLOCK_ASSERT_LOCKED(lock)					\
128 	do {								\
129 		if ((lock) && evthread_lock_debugging_enabled_) {	\
130 			EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
131 		}							\
132 	} while (/*CONSTCOND*/0)
133 
134 /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
135  * manage to get it. */
136 static inline int EVLOCK_TRY_LOCK_(void *lock);
137 static inline int
138 EVLOCK_TRY_LOCK_(void *lock)
139 {
140 	if (lock && evthread_lock_fns_.lock) {
141 		int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock);
142 		return !r;
143 	} else {
144 		/* Locking is disabled either globally or for this thing;
145 		 * of course we count as having the lock. */
146 		return 1;
147 	}
148 }
149 
150 /** Allocate a new condition variable and store it in the void *, condvar */
151 #define EVTHREAD_ALLOC_COND(condvar)					\
152 	do {								\
153 		(condvar) = evthread_cond_fns_.alloc_condition ?	\
154 		    evthread_cond_fns_.alloc_condition(0) : NULL;	\
155 	} while (/*CONSTCOND*/0)
156 /** Deallocate and free a condition variable in condvar */
157 #define EVTHREAD_FREE_COND(cond)					\
158 	do {								\
159 		if (cond)						\
160 			evthread_cond_fns_.free_condition((cond));	\
161 	} while (/*CONSTCOND*/0)
162 /** Signal one thread waiting on cond */
163 #define EVTHREAD_COND_SIGNAL(cond)					\
164 	( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 )
165 /** Signal all threads waiting on cond */
166 #define EVTHREAD_COND_BROADCAST(cond)					\
167 	( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 )
168 /** Wait until the condition 'cond' is signalled.  Must be called while
169  * holding 'lock'.  The lock will be released until the condition is
170  * signalled, at which point it will be acquired again.  Returns 0 for
171  * success, -1 for failure. */
172 #define EVTHREAD_COND_WAIT(cond, lock)					\
173 	( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 )
174 /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed.  Returns 1
175  * on timeout. */
176 #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv)			\
177 	( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 )
178 
179 /** True iff locking functions have been configured. */
180 #define EVTHREAD_LOCKING_ENABLED()		\
181 	(evthread_lock_fns_.lock != NULL)
182 
183 #elif ! defined(EVENT__DISABLE_THREAD_SUPPORT)
184 
185 unsigned long evthreadimpl_get_id_(void);
186 int evthreadimpl_is_lock_debugging_enabled_(void);
187 void *evthreadimpl_lock_alloc_(unsigned locktype);
188 void evthreadimpl_lock_free_(void *lock, unsigned locktype);
189 int evthreadimpl_lock_lock_(unsigned mode, void *lock);
190 int evthreadimpl_lock_unlock_(unsigned mode, void *lock);
191 void *evthreadimpl_cond_alloc_(unsigned condtype);
192 void evthreadimpl_cond_free_(void *cond);
193 int evthreadimpl_cond_signal_(void *cond, int broadcast);
194 int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv);
195 int evthreadimpl_locking_enabled_(void);
196 
197 #define EVTHREAD_GET_ID() evthreadimpl_get_id_()
198 #define EVBASE_IN_THREAD(base)				\
199 	((base)->th_owner_id == evthreadimpl_get_id_())
200 #define EVBASE_NEED_NOTIFY(base)			 \
201 	((base)->running_loop &&			 \
202 	    ((base)->th_owner_id != evthreadimpl_get_id_()))
203 
204 #define EVTHREAD_ALLOC_LOCK(lockvar, locktype)		\
205 	((lockvar) = evthreadimpl_lock_alloc_(locktype))
206 
207 #define EVTHREAD_FREE_LOCK(lockvar, locktype)				\
208 	do {								\
209 		void *lock_tmp_ = (lockvar);				\
210 		if (lock_tmp_)						\
211 			evthreadimpl_lock_free_(lock_tmp_, (locktype)); \
212 	} while (/*CONSTCOND*/0)
213 
214 /** Acquire a lock. */
215 #define EVLOCK_LOCK(lockvar,mode)					\
216 	do {								\
217 		if (lockvar)						\
218 			evthreadimpl_lock_lock_(mode, lockvar);		\
219 	} while (/*CONSTCOND*/0)
220 
221 /** Release a lock */
222 #define EVLOCK_UNLOCK(lockvar,mode)					\
223 	do {								\
224 		if (lockvar)						\
225 			evthreadimpl_lock_unlock_(mode, lockvar);	\
226 	} while (/*CONSTCOND*/0)
227 
228 /** Lock an event_base, if it is set up for locking.  Acquires the lock
229     in the base structure whose field is named 'lockvar'. */
230 #define EVBASE_ACQUIRE_LOCK(base, lockvar) do {				\
231 		EVLOCK_LOCK((base)->lockvar, 0);			\
232 	} while (/*CONSTCOND*/0)
233 
234 /** Unlock an event_base, if it is set up for locking. */
235 #define EVBASE_RELEASE_LOCK(base, lockvar) do {				\
236 		EVLOCK_UNLOCK((base)->lockvar, 0);			\
237 	} while (/*CONSTCOND*/0)
238 
239 /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
240  * locked and held by us. */
241 #define EVLOCK_ASSERT_LOCKED(lock)					\
242 	do {								\
243 		if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \
244 			EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
245 		}							\
246 	} while (/*CONSTCOND*/0)
247 
248 /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
249  * manage to get it. */
250 static inline int EVLOCK_TRY_LOCK_(void *lock);
251 static inline int
252 EVLOCK_TRY_LOCK_(void *lock)
253 {
254 	if (lock) {
255 		int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock);
256 		return !r;
257 	} else {
258 		/* Locking is disabled either globally or for this thing;
259 		 * of course we count as having the lock. */
260 		return 1;
261 	}
262 }
263 
264 /** Allocate a new condition variable and store it in the void *, condvar */
265 #define EVTHREAD_ALLOC_COND(condvar)					\
266 	do {								\
267 		(condvar) = evthreadimpl_cond_alloc_(0);		\
268 	} while (/*CONSTCOND*/0)
269 /** Deallocate and free a condition variable in condvar */
270 #define EVTHREAD_FREE_COND(cond)					\
271 	do {								\
272 		if (cond)						\
273 			evthreadimpl_cond_free_((cond));		\
274 	} while (/*CONSTCOND*/0)
275 /** Signal one thread waiting on cond */
276 #define EVTHREAD_COND_SIGNAL(cond)					\
277 	( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 )
278 /** Signal all threads waiting on cond */
279 #define EVTHREAD_COND_BROADCAST(cond)					\
280 	( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 )
281 /** Wait until the condition 'cond' is signalled.  Must be called while
282  * holding 'lock'.  The lock will be released until the condition is
283  * signalled, at which point it will be acquired again.  Returns 0 for
284  * success, -1 for failure. */
285 #define EVTHREAD_COND_WAIT(cond, lock)					\
286 	( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 )
287 /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed.  Returns 1
288  * on timeout. */
289 #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv)			\
290 	( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 )
291 
292 #define EVTHREAD_LOCKING_ENABLED()		\
293 	(evthreadimpl_locking_enabled_())
294 
295 #else /* EVENT__DISABLE_THREAD_SUPPORT */
296 
297 #define EVTHREAD_GET_ID()	1
298 #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
299 #define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
300 
301 #define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_
302 #define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_
303 #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
304 #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
305 
306 #define EVBASE_IN_THREAD(base)	1
307 #define EVBASE_NEED_NOTIFY(base) 0
308 #define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_
309 #define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_
310 #define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_
311 
312 #define EVLOCK_TRY_LOCK_(lock) 1
313 
314 #define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_
315 #define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_
316 #define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_
317 #define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_
318 #define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_
319 #define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_
320 
321 #define EVTHREAD_LOCKING_ENABLED() 0
322 
323 #endif
324 
325 /* This code is shared between both lock impls */
326 #if ! defined(EVENT__DISABLE_THREAD_SUPPORT)
327 /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
328 #define EVLOCK_SORTLOCKS_(lockvar1, lockvar2)				\
329 	do {								\
330 		if (lockvar1 && lockvar2 && lockvar1 > lockvar2) {	\
331 			void *tmp = lockvar1;				\
332 			lockvar1 = lockvar2;				\
333 			lockvar2 = tmp;					\
334 		}							\
335 	} while (/*CONSTCOND*/0)
336 
337 /** Acquire both lock1 and lock2.  Always allocates locks in the same order,
338  * so that two threads locking two locks with LOCK2 will not deadlock. */
339 #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2)				\
340 	do {								\
341 		void *lock1_tmplock_ = (lock1);				\
342 		void *lock2_tmplock_ = (lock2);				\
343 		EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_);	\
344 		EVLOCK_LOCK(lock1_tmplock_,mode1);			\
345 		if (lock2_tmplock_ != lock1_tmplock_)			\
346 			EVLOCK_LOCK(lock2_tmplock_,mode2);		\
347 	} while (/*CONSTCOND*/0)
348 /** Release both lock1 and lock2.  */
349 #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2)				\
350 	do {								\
351 		void *lock1_tmplock_ = (lock1);				\
352 		void *lock2_tmplock_ = (lock2);				\
353 		EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_);	\
354 		if (lock2_tmplock_ != lock1_tmplock_)			\
355 			EVLOCK_UNLOCK(lock2_tmplock_,mode2);		\
356 		EVLOCK_UNLOCK(lock1_tmplock_,mode1);			\
357 	} while (/*CONSTCOND*/0)
358 
359 int evthread_is_debug_lock_held_(void *lock);
360 void *evthread_debug_get_real_lock_(void *lock);
361 
362 void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
363     int enable_locks);
364 
365 #define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype)			\
366 	do {								\
367 		lockvar = evthread_setup_global_lock_(lockvar,		\
368 		    (locktype), enable_locks);				\
369 		if (!lockvar) {						\
370 			event_warn("Couldn't allocate %s", #lockvar);	\
371 			return -1;					\
372 		}							\
373 	} while (/*CONSTCOND*/0);
374 
375 int event_global_setup_locks_(const int enable_locks);
376 int evsig_global_setup_locks_(const int enable_locks);
377 int evutil_global_setup_locks_(const int enable_locks);
378 int evutil_secure_rng_global_setup_locks_(const int enable_locks);
379 
380 /** Return current evthread_lock_callbacks */
381 struct evthread_lock_callbacks *evthread_get_lock_callbacks(void);
382 /** Return current evthread_condition_callbacks */
383 struct evthread_condition_callbacks *evthread_get_condition_callbacks(void);
384 /** Disable locking for internal usage (like global shutdown) */
385 void evthreadimpl_disable_lock_debugging_(void);
386 
387 #endif
388 
389 #ifdef __cplusplus
390 }
391 #endif
392 
393 #endif /* EVTHREAD_INTERNAL_H_INCLUDED_ */
394