xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/evthread.c (revision eabc0478de71e4e011a5b4e0392741e01d491794)
1 /*	$NetBSD: evthread.c,v 1.7 2024/08/18 20:47:21 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "event2/event-config.h"
30 #include "evconfig-private.h"
31 
32 #ifndef EVENT__DISABLE_THREAD_SUPPORT
33 
34 #include "event2/thread.h"
35 
36 #include <stdlib.h>
37 #include <string.h>
38 
39 #include "log-internal.h"
40 #include "mm-internal.h"
41 #include "util-internal.h"
42 #include "evthread-internal.h"
43 
44 #ifdef EVTHREAD_EXPOSE_STRUCTS
45 #define GLOBAL
46 #else
47 #define GLOBAL static
48 #endif
49 
50 #ifndef EVENT__DISABLE_DEBUG_MODE
51 extern int event_debug_created_threadable_ctx_;
52 extern int event_debug_mode_on_;
53 #endif
54 
55 /* globals */
56 GLOBAL int evthread_lock_debugging_enabled_ = 0;
57 GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
58 	0, 0, NULL, NULL, NULL, NULL
59 };
60 GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
61 GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
62 	0, NULL, NULL, NULL, NULL
63 };
64 
65 /* Used for debugging */
66 static struct evthread_lock_callbacks original_lock_fns_ = {
67 	0, 0, NULL, NULL, NULL, NULL
68 };
69 static struct evthread_condition_callbacks original_cond_fns_ = {
70 	0, NULL, NULL, NULL, NULL
71 };
72 
73 void
74 evthread_set_id_callback(unsigned long (*id_fn)(void))
75 {
76 	evthread_id_fn_ = id_fn;
77 }
78 
79 struct evthread_lock_callbacks *evthread_get_lock_callbacks()
80 {
81 	return evthread_lock_debugging_enabled_
82 	    ? &original_lock_fns_ : &evthread_lock_fns_;
83 }
84 struct evthread_condition_callbacks *evthread_get_condition_callbacks()
85 {
86 	return evthread_lock_debugging_enabled_
87 	    ? &original_cond_fns_ : &evthread_cond_fns_;
88 }
89 void evthreadimpl_disable_lock_debugging_(void)
90 {
91 	evthread_lock_debugging_enabled_ = 0;
92 }
93 
94 int
95 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
96 {
97 	struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
98 
99 #ifndef EVENT__DISABLE_DEBUG_MODE
100 	if (event_debug_mode_on_) {
101 		if (event_debug_created_threadable_ctx_) {
102 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
103 		}
104 	}
105 #endif
106 
107 	if (!cbs) {
108 		if (target->alloc)
109 			event_warnx("Trying to disable lock functions after "
110 			    "they have been set up will probaby not work.");
111 		memset(target, 0, sizeof(evthread_lock_fns_));
112 		return 0;
113 	}
114 	if (target->alloc) {
115 		/* Uh oh; we already had locking callbacks set up.*/
116 		if (target->lock_api_version == cbs->lock_api_version &&
117 			target->supported_locktypes == cbs->supported_locktypes &&
118 			target->alloc == cbs->alloc &&
119 			target->free == cbs->free &&
120 			target->lock == cbs->lock &&
121 			target->unlock == cbs->unlock) {
122 			/* no change -- allow this. */
123 			return 0;
124 		}
125 		event_warnx("Can't change lock callbacks once they have been "
126 		    "initialized.");
127 		return -1;
128 	}
129 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
130 		memcpy(target, cbs, sizeof(evthread_lock_fns_));
131 		return event_global_setup_locks_(1);
132 	} else {
133 		return -1;
134 	}
135 }
136 
137 int
138 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
139 {
140 	struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
141 
142 #ifndef EVENT__DISABLE_DEBUG_MODE
143 	if (event_debug_mode_on_) {
144 		if (event_debug_created_threadable_ctx_) {
145 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
146 		}
147 	}
148 #endif
149 
150 	if (!cbs) {
151 		if (target->alloc_condition)
152 			event_warnx("Trying to disable condition functions "
153 			    "after they have been set up will probaby not "
154 			    "work.");
155 		memset(target, 0, sizeof(evthread_cond_fns_));
156 		return 0;
157 	}
158 	if (target->alloc_condition) {
159 		/* Uh oh; we already had condition callbacks set up.*/
160 		if (target->condition_api_version == cbs->condition_api_version &&
161 			target->alloc_condition == cbs->alloc_condition &&
162 			target->free_condition == cbs->free_condition &&
163 			target->signal_condition == cbs->signal_condition &&
164 			target->wait_condition == cbs->wait_condition) {
165 			/* no change -- allow this. */
166 			return 0;
167 		}
168 		event_warnx("Can't change condition callbacks once they "
169 		    "have been initialized.");
170 		return -1;
171 	}
172 	if (cbs->alloc_condition && cbs->free_condition &&
173 	    cbs->signal_condition && cbs->wait_condition) {
174 		memcpy(target, cbs, sizeof(evthread_cond_fns_));
175 	}
176 	if (evthread_lock_debugging_enabled_) {
177 		evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
178 		evthread_cond_fns_.free_condition = cbs->free_condition;
179 		evthread_cond_fns_.signal_condition = cbs->signal_condition;
180 	}
181 	return 0;
182 }
183 
184 #define DEBUG_LOCK_SIG	0xdeb0b10c
185 
186 struct debug_lock {
187 	unsigned signature;
188 	unsigned locktype;
189 	unsigned long held_by;
190 	/* XXXX if we ever use read-write locks, we will need a separate
191 	 * lock to protect count. */
192 	int count;
193 	void *lock;
194 };
195 
196 static void *
197 debug_lock_alloc(unsigned locktype)
198 {
199 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
200 	if (!result)
201 		return NULL;
202 	if (original_lock_fns_.alloc) {
203 		if (!(result->lock = original_lock_fns_.alloc(
204 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
205 			mm_free(result);
206 			return NULL;
207 		}
208 	} else {
209 		result->lock = NULL;
210 	}
211 	result->signature = DEBUG_LOCK_SIG;
212 	result->locktype = locktype;
213 	result->count = 0;
214 	result->held_by = 0;
215 	return result;
216 }
217 
218 static void
219 debug_lock_free(void *lock_, unsigned locktype)
220 {
221 	struct debug_lock *lock = lock_;
222 	EVUTIL_ASSERT(lock->count == 0);
223 	EVUTIL_ASSERT(locktype == lock->locktype);
224 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
225 	if (original_lock_fns_.free) {
226 		original_lock_fns_.free(lock->lock,
227 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
228 	}
229 	lock->lock = NULL;
230 	lock->count = -100;
231 	lock->signature = 0x12300fda;
232 	mm_free(lock);
233 }
234 
235 static void
236 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
237 {
238 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
239 	++lock->count;
240 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
241 		EVUTIL_ASSERT(lock->count == 1);
242 	if (evthread_id_fn_) {
243 		unsigned long me;
244 		me = evthread_id_fn_();
245 		if (lock->count > 1)
246 			EVUTIL_ASSERT(lock->held_by == me);
247 		lock->held_by = me;
248 	}
249 }
250 
251 static int
252 debug_lock_lock(unsigned mode, void *lock_)
253 {
254 	struct debug_lock *lock = lock_;
255 	int res = 0;
256 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
257 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
258 	else
259 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
260 	if (original_lock_fns_.lock)
261 		res = original_lock_fns_.lock(mode, lock->lock);
262 	if (!res) {
263 		evthread_debug_lock_mark_locked(mode, lock);
264 	}
265 	return res;
266 }
267 
268 static void
269 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
270 {
271 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
272 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
273 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
274 	else
275 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
276 	if (evthread_id_fn_) {
277 		unsigned long me;
278 		me = evthread_id_fn_();
279 		EVUTIL_ASSERT(lock->held_by == me);
280 		if (lock->count == 1)
281 			lock->held_by = 0;
282 	}
283 	--lock->count;
284 	EVUTIL_ASSERT(lock->count >= 0);
285 }
286 
287 static int
288 debug_lock_unlock(unsigned mode, void *lock_)
289 {
290 	struct debug_lock *lock = lock_;
291 	int res = 0;
292 	evthread_debug_lock_mark_unlocked(mode, lock);
293 	if (original_lock_fns_.unlock)
294 		res = original_lock_fns_.unlock(mode, lock->lock);
295 	return res;
296 }
297 
298 static int
299 debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
300 {
301 	int r;
302 	struct debug_lock *lock = lock_;
303 	EVUTIL_ASSERT(lock);
304 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
305 	EVLOCK_ASSERT_LOCKED(lock_);
306 	evthread_debug_lock_mark_unlocked(0, lock);
307 	r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
308 	evthread_debug_lock_mark_locked(0, lock);
309 	return r;
310 }
311 
312 /* misspelled version for backward compatibility */
313 void
314 evthread_enable_lock_debuging(void)
315 {
316 	evthread_enable_lock_debugging();
317 }
318 
319 void
320 evthread_enable_lock_debugging(void)
321 {
322 	struct evthread_lock_callbacks cbs = {
323 		EVTHREAD_LOCK_API_VERSION,
324 		EVTHREAD_LOCKTYPE_RECURSIVE,
325 		debug_lock_alloc,
326 		debug_lock_free,
327 		debug_lock_lock,
328 		debug_lock_unlock
329 	};
330 	if (evthread_lock_debugging_enabled_)
331 		return;
332 	memcpy(&original_lock_fns_, &evthread_lock_fns_,
333 	    sizeof(struct evthread_lock_callbacks));
334 	memcpy(&evthread_lock_fns_, &cbs,
335 	    sizeof(struct evthread_lock_callbacks));
336 
337 	memcpy(&original_cond_fns_, &evthread_cond_fns_,
338 	    sizeof(struct evthread_condition_callbacks));
339 	evthread_cond_fns_.wait_condition = debug_cond_wait;
340 	evthread_lock_debugging_enabled_ = 1;
341 
342 	/* XXX return value should get checked. */
343 	event_global_setup_locks_(0);
344 }
345 
346 int
347 evthread_is_debug_lock_held_(void *lock_)
348 {
349 	struct debug_lock *lock = lock_;
350 	if (! lock->count)
351 		return 0;
352 	if (evthread_id_fn_) {
353 		unsigned long me = evthread_id_fn_();
354 		if (lock->held_by != me)
355 			return 0;
356 	}
357 	return 1;
358 }
359 
360 void *
361 evthread_debug_get_real_lock_(void *lock_)
362 {
363 	struct debug_lock *lock = lock_;
364 	return lock->lock;
365 }
366 
367 void *
368 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
369 {
370 	/* there are four cases here:
371 	   1) we're turning on debugging; locking is not on.
372 	   2) we're turning on debugging; locking is on.
373 	   3) we're turning on locking; debugging is not on.
374 	   4) we're turning on locking; debugging is on. */
375 
376 	if (!enable_locks && original_lock_fns_.alloc == NULL) {
377 		/* Case 1: allocate a debug lock. */
378 		EVUTIL_ASSERT(lock_ == NULL);
379 		return debug_lock_alloc(locktype);
380 	} else if (!enable_locks && original_lock_fns_.alloc != NULL) {
381 		/* Case 2: wrap the lock in a debug lock. */
382 		struct debug_lock *lock;
383 		EVUTIL_ASSERT(lock_ != NULL);
384 
385 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
386 			/* We can't wrap it: We need a recursive lock */
387 			original_lock_fns_.free(lock_, locktype);
388 			return debug_lock_alloc(locktype);
389 		}
390 		lock = mm_malloc(sizeof(struct debug_lock));
391 		if (!lock) {
392 			original_lock_fns_.free(lock_, locktype);
393 			return NULL;
394 		}
395 		lock->lock = lock_;
396 		lock->locktype = locktype;
397 		lock->count = 0;
398 		lock->held_by = 0;
399 		return lock;
400 	} else if (enable_locks && ! evthread_lock_debugging_enabled_) {
401 		/* Case 3: allocate a regular lock */
402 		EVUTIL_ASSERT(lock_ == NULL);
403 		return evthread_lock_fns_.alloc(locktype);
404 	} else {
405 		/* Case 4: Fill in a debug lock with a real lock */
406 		struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
407 		EVUTIL_ASSERT(enable_locks &&
408 		              evthread_lock_debugging_enabled_);
409 		EVUTIL_ASSERT(lock->locktype == locktype);
410 		if (!lock->lock) {
411 			lock->lock = original_lock_fns_.alloc(
412 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
413 			if (!lock->lock) {
414 				lock->count = -200;
415 				mm_free(lock);
416 				return NULL;
417 			}
418 		}
419 		return lock;
420 	}
421 }
422 
423 
424 #ifndef EVTHREAD_EXPOSE_STRUCTS
425 unsigned long
426 evthreadimpl_get_id_()
427 {
428 	return evthread_id_fn_ ? evthread_id_fn_() : 1;
429 }
430 void *
431 evthreadimpl_lock_alloc_(unsigned locktype)
432 {
433 #ifndef EVENT__DISABLE_DEBUG_MODE
434 	if (event_debug_mode_on_) {
435 		event_debug_created_threadable_ctx_ = 1;
436 	}
437 #endif
438 
439 	return evthread_lock_fns_.alloc ?
440 	    evthread_lock_fns_.alloc(locktype) : NULL;
441 }
442 void
443 evthreadimpl_lock_free_(void *lock, unsigned locktype)
444 {
445 	if (evthread_lock_fns_.free)
446 		evthread_lock_fns_.free(lock, locktype);
447 }
448 int
449 evthreadimpl_lock_lock_(unsigned mode, void *lock)
450 {
451 	if (evthread_lock_fns_.lock)
452 		return evthread_lock_fns_.lock(mode, lock);
453 	else
454 		return 0;
455 }
456 int
457 evthreadimpl_lock_unlock_(unsigned mode, void *lock)
458 {
459 	if (evthread_lock_fns_.unlock)
460 		return evthread_lock_fns_.unlock(mode, lock);
461 	else
462 		return 0;
463 }
464 void *
465 evthreadimpl_cond_alloc_(unsigned condtype)
466 {
467 #ifndef EVENT__DISABLE_DEBUG_MODE
468 	if (event_debug_mode_on_) {
469 		event_debug_created_threadable_ctx_ = 1;
470 	}
471 #endif
472 
473 	return evthread_cond_fns_.alloc_condition ?
474 	    evthread_cond_fns_.alloc_condition(condtype) : NULL;
475 }
476 void
477 evthreadimpl_cond_free_(void *cond)
478 {
479 	if (evthread_cond_fns_.free_condition)
480 		evthread_cond_fns_.free_condition(cond);
481 }
482 int
483 evthreadimpl_cond_signal_(void *cond, int broadcast)
484 {
485 	if (evthread_cond_fns_.signal_condition)
486 		return evthread_cond_fns_.signal_condition(cond, broadcast);
487 	else
488 		return 0;
489 }
490 int
491 evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
492 {
493 	if (evthread_cond_fns_.wait_condition)
494 		return evthread_cond_fns_.wait_condition(cond, lock, tv);
495 	else
496 		return 0;
497 }
498 int
499 evthreadimpl_is_lock_debugging_enabled_(void)
500 {
501 	return evthread_lock_debugging_enabled_;
502 }
503 
504 int
505 evthreadimpl_locking_enabled_(void)
506 {
507 	return evthread_lock_fns_.lock != NULL;
508 }
509 #endif
510 
511 #endif
512