xref: /netbsd-src/external/bsd/libevent/dist/evthread.c (revision 7e68cdd7306a8b6c32d6a32c16ba01e5a2ddc083)
1 /*	$NetBSD: evthread.c,v 1.3 2021/04/07 03:36:48 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "event2/event-config.h"
30 #include <sys/cdefs.h>
31 __RCSID("$NetBSD: evthread.c,v 1.3 2021/04/07 03:36:48 christos Exp $");
32 #include "evconfig-private.h"
33 
34 #ifndef EVENT__DISABLE_THREAD_SUPPORT
35 
36 #include "event2/thread.h"
37 
38 #include <stdlib.h>
39 #include <string.h>
40 
41 #include "log-internal.h"
42 #include "mm-internal.h"
43 #include "util-internal.h"
44 #include "evthread-internal.h"
45 
46 #ifdef EVTHREAD_EXPOSE_STRUCTS
47 #define GLOBAL
48 #else
49 #define GLOBAL static
50 #endif
51 
52 #ifndef EVENT__DISABLE_DEBUG_MODE
53 extern int event_debug_created_threadable_ctx_;
54 extern int event_debug_mode_on_;
55 #endif
56 
57 /* globals */
58 GLOBAL int evthread_lock_debugging_enabled_ = 0;
59 GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
60 	0, 0, NULL, NULL, NULL, NULL
61 };
62 GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
63 GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
64 	0, NULL, NULL, NULL, NULL
65 };
66 
67 /* Used for debugging */
68 static struct evthread_lock_callbacks original_lock_fns_ = {
69 	0, 0, NULL, NULL, NULL, NULL
70 };
71 static struct evthread_condition_callbacks original_cond_fns_ = {
72 	0, NULL, NULL, NULL, NULL
73 };
74 
75 void
evthread_set_id_callback(unsigned long (* id_fn)(void))76 evthread_set_id_callback(unsigned long (*id_fn)(void))
77 {
78 	evthread_id_fn_ = id_fn;
79 }
80 
evthread_get_lock_callbacks()81 struct evthread_lock_callbacks *evthread_get_lock_callbacks()
82 {
83 	return evthread_lock_debugging_enabled_
84 	    ? &original_lock_fns_ : &evthread_lock_fns_;
85 }
evthread_get_condition_callbacks()86 struct evthread_condition_callbacks *evthread_get_condition_callbacks()
87 {
88 	return evthread_lock_debugging_enabled_
89 	    ? &original_cond_fns_ : &evthread_cond_fns_;
90 }
evthreadimpl_disable_lock_debugging_(void)91 void evthreadimpl_disable_lock_debugging_(void)
92 {
93 	evthread_lock_debugging_enabled_ = 0;
94 }
95 
96 int
evthread_set_lock_callbacks(const struct evthread_lock_callbacks * cbs)97 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
98 {
99 	struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
100 
101 #ifndef EVENT__DISABLE_DEBUG_MODE
102 	if (event_debug_mode_on_) {
103 		if (event_debug_created_threadable_ctx_) {
104 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
105 		}
106 	}
107 #endif
108 
109 	if (!cbs) {
110 		if (target->alloc)
111 			event_warnx("Trying to disable lock functions after "
112 			    "they have been set up will probably not work.");
113 		memset(target, 0, sizeof(evthread_lock_fns_));
114 		return 0;
115 	}
116 	if (target->alloc) {
117 		/* Uh oh; we already had locking callbacks set up.*/
118 		if (target->lock_api_version == cbs->lock_api_version &&
119 			target->supported_locktypes == cbs->supported_locktypes &&
120 			target->alloc == cbs->alloc &&
121 			target->free == cbs->free &&
122 			target->lock == cbs->lock &&
123 			target->unlock == cbs->unlock) {
124 			/* no change -- allow this. */
125 			return 0;
126 		}
127 		event_warnx("Can't change lock callbacks once they have been "
128 		    "initialized.");
129 		return -1;
130 	}
131 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
132 		memcpy(target, cbs, sizeof(evthread_lock_fns_));
133 		return event_global_setup_locks_(1);
134 	} else {
135 		return -1;
136 	}
137 }
138 
139 int
evthread_set_condition_callbacks(const struct evthread_condition_callbacks * cbs)140 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
141 {
142 	struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
143 
144 #ifndef EVENT__DISABLE_DEBUG_MODE
145 	if (event_debug_mode_on_) {
146 		if (event_debug_created_threadable_ctx_) {
147 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
148 		}
149 	}
150 #endif
151 
152 	if (!cbs) {
153 		if (target->alloc_condition)
154 			event_warnx("Trying to disable condition functions "
155 			    "after they have been set up will probably not "
156 			    "work.");
157 		memset(target, 0, sizeof(evthread_cond_fns_));
158 		return 0;
159 	}
160 	if (target->alloc_condition) {
161 		/* Uh oh; we already had condition callbacks set up.*/
162 		if (target->condition_api_version == cbs->condition_api_version &&
163 			target->alloc_condition == cbs->alloc_condition &&
164 			target->free_condition == cbs->free_condition &&
165 			target->signal_condition == cbs->signal_condition &&
166 			target->wait_condition == cbs->wait_condition) {
167 			/* no change -- allow this. */
168 			return 0;
169 		}
170 		event_warnx("Can't change condition callbacks once they "
171 		    "have been initialized.");
172 		return -1;
173 	}
174 	if (cbs->alloc_condition && cbs->free_condition &&
175 	    cbs->signal_condition && cbs->wait_condition) {
176 		memcpy(target, cbs, sizeof(evthread_cond_fns_));
177 	}
178 	if (evthread_lock_debugging_enabled_) {
179 		evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
180 		evthread_cond_fns_.free_condition = cbs->free_condition;
181 		evthread_cond_fns_.signal_condition = cbs->signal_condition;
182 	}
183 	return 0;
184 }
185 
186 #define DEBUG_LOCK_SIG	0xdeb0b10c
187 
188 struct debug_lock {
189 	unsigned signature;
190 	unsigned locktype;
191 	unsigned long held_by;
192 	/* XXXX if we ever use read-write locks, we will need a separate
193 	 * lock to protect count. */
194 	int count;
195 	void *lock;
196 };
197 
198 static void *
debug_lock_alloc(unsigned locktype)199 debug_lock_alloc(unsigned locktype)
200 {
201 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
202 	if (!result)
203 		return NULL;
204 	if (original_lock_fns_.alloc) {
205 		if (!(result->lock = original_lock_fns_.alloc(
206 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
207 			mm_free(result);
208 			return NULL;
209 		}
210 	} else {
211 		result->lock = NULL;
212 	}
213 	result->signature = DEBUG_LOCK_SIG;
214 	result->locktype = locktype;
215 	result->count = 0;
216 	result->held_by = 0;
217 	return result;
218 }
219 
220 static void
debug_lock_free(void * lock_,unsigned locktype)221 debug_lock_free(void *lock_, unsigned locktype)
222 {
223 	struct debug_lock *lock = lock_;
224 	EVUTIL_ASSERT(lock->count == 0);
225 	EVUTIL_ASSERT(locktype == lock->locktype);
226 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
227 	if (original_lock_fns_.free) {
228 		original_lock_fns_.free(lock->lock,
229 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
230 	}
231 	lock->lock = NULL;
232 	lock->count = -100;
233 	lock->signature = 0x12300fda;
234 	mm_free(lock);
235 }
236 
237 static void
evthread_debug_lock_mark_locked(unsigned mode,struct debug_lock * lock)238 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
239 {
240 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
241 	++lock->count;
242 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
243 		EVUTIL_ASSERT(lock->count == 1);
244 	if (evthread_id_fn_) {
245 		unsigned long me;
246 		me = evthread_id_fn_();
247 		if (lock->count > 1)
248 			EVUTIL_ASSERT(lock->held_by == me);
249 		lock->held_by = me;
250 	}
251 }
252 
253 static int
debug_lock_lock(unsigned mode,void * lock_)254 debug_lock_lock(unsigned mode, void *lock_)
255 {
256 	struct debug_lock *lock = lock_;
257 	int res = 0;
258 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
259 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
260 	else
261 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
262 	if (original_lock_fns_.lock)
263 		res = original_lock_fns_.lock(mode, lock->lock);
264 	if (!res) {
265 		evthread_debug_lock_mark_locked(mode, lock);
266 	}
267 	return res;
268 }
269 
270 static void
evthread_debug_lock_mark_unlocked(unsigned mode,struct debug_lock * lock)271 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
272 {
273 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
274 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
275 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
276 	else
277 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
278 	if (evthread_id_fn_) {
279 		unsigned long me;
280 		me = evthread_id_fn_();
281 		EVUTIL_ASSERT(lock->held_by == me);
282 		if (lock->count == 1)
283 			lock->held_by = 0;
284 	}
285 	--lock->count;
286 	EVUTIL_ASSERT(lock->count >= 0);
287 }
288 
289 static int
debug_lock_unlock(unsigned mode,void * lock_)290 debug_lock_unlock(unsigned mode, void *lock_)
291 {
292 	struct debug_lock *lock = lock_;
293 	int res = 0;
294 	evthread_debug_lock_mark_unlocked(mode, lock);
295 	if (original_lock_fns_.unlock)
296 		res = original_lock_fns_.unlock(mode, lock->lock);
297 	return res;
298 }
299 
300 static int
debug_cond_wait(void * cond_,void * lock_,const struct timeval * tv)301 debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
302 {
303 	int r;
304 	struct debug_lock *lock = lock_;
305 	EVUTIL_ASSERT(lock);
306 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
307 	EVLOCK_ASSERT_LOCKED(lock_);
308 	evthread_debug_lock_mark_unlocked(0, lock);
309 	r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
310 	evthread_debug_lock_mark_locked(0, lock);
311 	return r;
312 }
313 
314 /* misspelled version for backward compatibility */
315 void
evthread_enable_lock_debuging(void)316 evthread_enable_lock_debuging(void)
317 {
318 	evthread_enable_lock_debugging();
319 }
320 
321 void
evthread_enable_lock_debugging(void)322 evthread_enable_lock_debugging(void)
323 {
324 	struct evthread_lock_callbacks cbs = {
325 		EVTHREAD_LOCK_API_VERSION,
326 		EVTHREAD_LOCKTYPE_RECURSIVE,
327 		debug_lock_alloc,
328 		debug_lock_free,
329 		debug_lock_lock,
330 		debug_lock_unlock
331 	};
332 	if (evthread_lock_debugging_enabled_)
333 		return;
334 	memcpy(&original_lock_fns_, &evthread_lock_fns_,
335 	    sizeof(struct evthread_lock_callbacks));
336 	memcpy(&evthread_lock_fns_, &cbs,
337 	    sizeof(struct evthread_lock_callbacks));
338 
339 	memcpy(&original_cond_fns_, &evthread_cond_fns_,
340 	    sizeof(struct evthread_condition_callbacks));
341 	evthread_cond_fns_.wait_condition = debug_cond_wait;
342 	evthread_lock_debugging_enabled_ = 1;
343 
344 	/* XXX return value should get checked. */
345 	event_global_setup_locks_(0);
346 }
347 
348 int
evthread_is_debug_lock_held_(void * lock_)349 evthread_is_debug_lock_held_(void *lock_)
350 {
351 	struct debug_lock *lock = lock_;
352 	if (! lock->count)
353 		return 0;
354 	if (evthread_id_fn_) {
355 		unsigned long me = evthread_id_fn_();
356 		if (lock->held_by != me)
357 			return 0;
358 	}
359 	return 1;
360 }
361 
362 void *
evthread_debug_get_real_lock_(void * lock_)363 evthread_debug_get_real_lock_(void *lock_)
364 {
365 	struct debug_lock *lock = lock_;
366 	return lock->lock;
367 }
368 
369 void *
evthread_setup_global_lock_(void * lock_,unsigned locktype,int enable_locks)370 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
371 {
372 	/* there are four cases here:
373 	   1) we're turning on debugging; locking is not on.
374 	   2) we're turning on debugging; locking is on.
375 	   3) we're turning on locking; debugging is not on.
376 	   4) we're turning on locking; debugging is on. */
377 
378 	if (!enable_locks && original_lock_fns_.alloc == NULL) {
379 		/* Case 1: allocate a debug lock. */
380 		EVUTIL_ASSERT(lock_ == NULL);
381 		return debug_lock_alloc(locktype);
382 	} else if (!enable_locks && original_lock_fns_.alloc != NULL) {
383 		/* Case 2: wrap the lock in a debug lock. */
384 		struct debug_lock *lock;
385 		EVUTIL_ASSERT(lock_ != NULL);
386 
387 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
388 			/* We can't wrap it: We need a recursive lock */
389 			original_lock_fns_.free(lock_, locktype);
390 			return debug_lock_alloc(locktype);
391 		}
392 		lock = mm_malloc(sizeof(struct debug_lock));
393 		if (!lock) {
394 			original_lock_fns_.free(lock_, locktype);
395 			return NULL;
396 		}
397 		lock->lock = lock_;
398 		lock->locktype = locktype;
399 		lock->count = 0;
400 		lock->held_by = 0;
401 		return lock;
402 	} else if (enable_locks && ! evthread_lock_debugging_enabled_) {
403 		/* Case 3: allocate a regular lock */
404 		EVUTIL_ASSERT(lock_ == NULL);
405 		return evthread_lock_fns_.alloc(locktype);
406 	} else {
407 		/* Case 4: Fill in a debug lock with a real lock */
408 		struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
409 		EVUTIL_ASSERT(enable_locks &&
410 		              evthread_lock_debugging_enabled_);
411 		EVUTIL_ASSERT(lock->locktype == locktype);
412 		if (!lock->lock) {
413 			lock->lock = original_lock_fns_.alloc(
414 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
415 			if (!lock->lock) {
416 				lock->count = -200;
417 				mm_free(lock);
418 				return NULL;
419 			}
420 		}
421 		return lock;
422 	}
423 }
424 
425 
426 #ifndef EVTHREAD_EXPOSE_STRUCTS
427 unsigned long
evthreadimpl_get_id_()428 evthreadimpl_get_id_()
429 {
430 	return evthread_id_fn_ ? evthread_id_fn_() : 1;
431 }
432 void *
evthreadimpl_lock_alloc_(unsigned locktype)433 evthreadimpl_lock_alloc_(unsigned locktype)
434 {
435 #ifndef EVENT__DISABLE_DEBUG_MODE
436 	if (event_debug_mode_on_) {
437 		event_debug_created_threadable_ctx_ = 1;
438 	}
439 #endif
440 
441 	return evthread_lock_fns_.alloc ?
442 	    evthread_lock_fns_.alloc(locktype) : NULL;
443 }
444 void
evthreadimpl_lock_free_(void * lock,unsigned locktype)445 evthreadimpl_lock_free_(void *lock, unsigned locktype)
446 {
447 	if (evthread_lock_fns_.free)
448 		evthread_lock_fns_.free(lock, locktype);
449 }
450 int
evthreadimpl_lock_lock_(unsigned mode,void * lock)451 evthreadimpl_lock_lock_(unsigned mode, void *lock)
452 {
453 	if (evthread_lock_fns_.lock)
454 		return evthread_lock_fns_.lock(mode, lock);
455 	else
456 		return 0;
457 }
458 int
evthreadimpl_lock_unlock_(unsigned mode,void * lock)459 evthreadimpl_lock_unlock_(unsigned mode, void *lock)
460 {
461 	if (evthread_lock_fns_.unlock)
462 		return evthread_lock_fns_.unlock(mode, lock);
463 	else
464 		return 0;
465 }
466 void *
evthreadimpl_cond_alloc_(unsigned condtype)467 evthreadimpl_cond_alloc_(unsigned condtype)
468 {
469 #ifndef EVENT__DISABLE_DEBUG_MODE
470 	if (event_debug_mode_on_) {
471 		event_debug_created_threadable_ctx_ = 1;
472 	}
473 #endif
474 
475 	return evthread_cond_fns_.alloc_condition ?
476 	    evthread_cond_fns_.alloc_condition(condtype) : NULL;
477 }
478 void
evthreadimpl_cond_free_(void * cond)479 evthreadimpl_cond_free_(void *cond)
480 {
481 	if (evthread_cond_fns_.free_condition)
482 		evthread_cond_fns_.free_condition(cond);
483 }
484 int
evthreadimpl_cond_signal_(void * cond,int broadcast)485 evthreadimpl_cond_signal_(void *cond, int broadcast)
486 {
487 	if (evthread_cond_fns_.signal_condition)
488 		return evthread_cond_fns_.signal_condition(cond, broadcast);
489 	else
490 		return 0;
491 }
492 int
evthreadimpl_cond_wait_(void * cond,void * lock,const struct timeval * tv)493 evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
494 {
495 	if (evthread_cond_fns_.wait_condition)
496 		return evthread_cond_fns_.wait_condition(cond, lock, tv);
497 	else
498 		return 0;
499 }
500 int
evthreadimpl_is_lock_debugging_enabled_(void)501 evthreadimpl_is_lock_debugging_enabled_(void)
502 {
503 	return evthread_lock_debugging_enabled_;
504 }
505 
506 int
evthreadimpl_locking_enabled_(void)507 evthreadimpl_locking_enabled_(void)
508 {
509 	return evthread_lock_fns_.lock != NULL;
510 }
511 #endif
512 
513 #endif
514