xref: /netbsd-src/external/bsd/libevent/dist/evthread.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: evthread.c,v 1.2 2017/03/02 15:43:14 christos Exp $	*/
2 /*
3  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "event2/event-config.h"
29 #include <sys/cdefs.h>
30 __RCSID("$NetBSD: evthread.c,v 1.2 2017/03/02 15:43:14 christos Exp $");
31 #include "evconfig-private.h"
32 
33 #ifndef EVENT__DISABLE_THREAD_SUPPORT
34 
35 #include "event2/thread.h"
36 
37 #include <stdlib.h>
38 #include <string.h>
39 
40 #include "log-internal.h"
41 #include "mm-internal.h"
42 #include "util-internal.h"
43 #include "evthread-internal.h"
44 
45 #ifdef EVTHREAD_EXPOSE_STRUCTS
46 #define GLOBAL
47 #else
48 #define GLOBAL static
49 #endif
50 
51 #ifndef EVENT__DISABLE_DEBUG_MODE
52 extern int event_debug_created_threadable_ctx_;
53 extern int event_debug_mode_on_;
54 #endif
55 
56 /* globals */
57 GLOBAL int evthread_lock_debugging_enabled_ = 0;
58 GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
59 	0, 0, NULL, NULL, NULL, NULL
60 };
61 GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
62 GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
63 	0, NULL, NULL, NULL, NULL
64 };
65 
66 /* Used for debugging */
67 static struct evthread_lock_callbacks original_lock_fns_ = {
68 	0, 0, NULL, NULL, NULL, NULL
69 };
70 static struct evthread_condition_callbacks original_cond_fns_ = {
71 	0, NULL, NULL, NULL, NULL
72 };
73 
74 void
75 evthread_set_id_callback(unsigned long (*id_fn)(void))
76 {
77 	evthread_id_fn_ = id_fn;
78 }
79 
80 struct evthread_lock_callbacks *evthread_get_lock_callbacks()
81 {
82 	return evthread_lock_debugging_enabled_
83 	    ? &original_lock_fns_ : &evthread_lock_fns_;
84 }
85 struct evthread_condition_callbacks *evthread_get_condition_callbacks()
86 {
87 	return evthread_lock_debugging_enabled_
88 	    ? &original_cond_fns_ : &evthread_cond_fns_;
89 }
90 void evthreadimpl_disable_lock_debugging_(void)
91 {
92 	evthread_lock_debugging_enabled_ = 0;
93 }
94 
95 int
96 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
97 {
98 	struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
99 
100 #ifndef EVENT__DISABLE_DEBUG_MODE
101 	if (event_debug_mode_on_) {
102 		if (event_debug_created_threadable_ctx_) {
103 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
104 		}
105 	}
106 #endif
107 
108 	if (!cbs) {
109 		if (target->alloc)
110 			event_warnx("Trying to disable lock functions after "
111 			    "they have been set up will probably not work.");
112 		memset(target, 0, sizeof(evthread_lock_fns_));
113 		return 0;
114 	}
115 	if (target->alloc) {
116 		/* Uh oh; we already had locking callbacks set up.*/
117 		if (target->lock_api_version == cbs->lock_api_version &&
118 			target->supported_locktypes == cbs->supported_locktypes &&
119 			target->alloc == cbs->alloc &&
120 			target->free == cbs->free &&
121 			target->lock == cbs->lock &&
122 			target->unlock == cbs->unlock) {
123 			/* no change -- allow this. */
124 			return 0;
125 		}
126 		event_warnx("Can't change lock callbacks once they have been "
127 		    "initialized.");
128 		return -1;
129 	}
130 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
131 		memcpy(target, cbs, sizeof(evthread_lock_fns_));
132 		return event_global_setup_locks_(1);
133 	} else {
134 		return -1;
135 	}
136 }
137 
138 int
139 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
140 {
141 	struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
142 
143 #ifndef EVENT__DISABLE_DEBUG_MODE
144 	if (event_debug_mode_on_) {
145 		if (event_debug_created_threadable_ctx_) {
146 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
147 		}
148 	}
149 #endif
150 
151 	if (!cbs) {
152 		if (target->alloc_condition)
153 			event_warnx("Trying to disable condition functions "
154 			    "after they have been set up will probably not "
155 			    "work.");
156 		memset(target, 0, sizeof(evthread_cond_fns_));
157 		return 0;
158 	}
159 	if (target->alloc_condition) {
160 		/* Uh oh; we already had condition callbacks set up.*/
161 		if (target->condition_api_version == cbs->condition_api_version &&
162 			target->alloc_condition == cbs->alloc_condition &&
163 			target->free_condition == cbs->free_condition &&
164 			target->signal_condition == cbs->signal_condition &&
165 			target->wait_condition == cbs->wait_condition) {
166 			/* no change -- allow this. */
167 			return 0;
168 		}
169 		event_warnx("Can't change condition callbacks once they "
170 		    "have been initialized.");
171 		return -1;
172 	}
173 	if (cbs->alloc_condition && cbs->free_condition &&
174 	    cbs->signal_condition && cbs->wait_condition) {
175 		memcpy(target, cbs, sizeof(evthread_cond_fns_));
176 	}
177 	if (evthread_lock_debugging_enabled_) {
178 		evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
179 		evthread_cond_fns_.free_condition = cbs->free_condition;
180 		evthread_cond_fns_.signal_condition = cbs->signal_condition;
181 	}
182 	return 0;
183 }
184 
185 #define DEBUG_LOCK_SIG	0xdeb0b10c
186 
187 struct debug_lock {
188 	unsigned signature;
189 	unsigned locktype;
190 	unsigned long held_by;
191 	/* XXXX if we ever use read-write locks, we will need a separate
192 	 * lock to protect count. */
193 	int count;
194 	void *lock;
195 };
196 
197 static void *
198 debug_lock_alloc(unsigned locktype)
199 {
200 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
201 	if (!result)
202 		return NULL;
203 	if (original_lock_fns_.alloc) {
204 		if (!(result->lock = original_lock_fns_.alloc(
205 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
206 			mm_free(result);
207 			return NULL;
208 		}
209 	} else {
210 		result->lock = NULL;
211 	}
212 	result->signature = DEBUG_LOCK_SIG;
213 	result->locktype = locktype;
214 	result->count = 0;
215 	result->held_by = 0;
216 	return result;
217 }
218 
219 static void
220 debug_lock_free(void *lock_, unsigned locktype)
221 {
222 	struct debug_lock *lock = lock_;
223 	EVUTIL_ASSERT(lock->count == 0);
224 	EVUTIL_ASSERT(locktype == lock->locktype);
225 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
226 	if (original_lock_fns_.free) {
227 		original_lock_fns_.free(lock->lock,
228 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
229 	}
230 	lock->lock = NULL;
231 	lock->count = -100;
232 	lock->signature = 0x12300fda;
233 	mm_free(lock);
234 }
235 
236 static void
237 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
238 {
239 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
240 	++lock->count;
241 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
242 		EVUTIL_ASSERT(lock->count == 1);
243 	if (evthread_id_fn_) {
244 		unsigned long me;
245 		me = evthread_id_fn_();
246 		if (lock->count > 1)
247 			EVUTIL_ASSERT(lock->held_by == me);
248 		lock->held_by = me;
249 	}
250 }
251 
252 static int
253 debug_lock_lock(unsigned mode, void *lock_)
254 {
255 	struct debug_lock *lock = lock_;
256 	int res = 0;
257 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
258 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
259 	else
260 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
261 	if (original_lock_fns_.lock)
262 		res = original_lock_fns_.lock(mode, lock->lock);
263 	if (!res) {
264 		evthread_debug_lock_mark_locked(mode, lock);
265 	}
266 	return res;
267 }
268 
269 static void
270 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
271 {
272 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
273 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
274 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
275 	else
276 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
277 	if (evthread_id_fn_) {
278 		unsigned long me;
279 		me = evthread_id_fn_();
280 		EVUTIL_ASSERT(lock->held_by == me);
281 		if (lock->count == 1)
282 			lock->held_by = 0;
283 	}
284 	--lock->count;
285 	EVUTIL_ASSERT(lock->count >= 0);
286 }
287 
288 static int
289 debug_lock_unlock(unsigned mode, void *lock_)
290 {
291 	struct debug_lock *lock = lock_;
292 	int res = 0;
293 	evthread_debug_lock_mark_unlocked(mode, lock);
294 	if (original_lock_fns_.unlock)
295 		res = original_lock_fns_.unlock(mode, lock->lock);
296 	return res;
297 }
298 
299 static int
300 debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
301 {
302 	int r;
303 	struct debug_lock *lock = lock_;
304 	EVUTIL_ASSERT(lock);
305 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
306 	EVLOCK_ASSERT_LOCKED(lock_);
307 	evthread_debug_lock_mark_unlocked(0, lock);
308 	r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
309 	evthread_debug_lock_mark_locked(0, lock);
310 	return r;
311 }
312 
313 /* misspelled version for backward compatibility */
314 void
315 evthread_enable_lock_debuging(void)
316 {
317 	evthread_enable_lock_debugging();
318 }
319 
320 void
321 evthread_enable_lock_debugging(void)
322 {
323 	struct evthread_lock_callbacks cbs = {
324 		EVTHREAD_LOCK_API_VERSION,
325 		EVTHREAD_LOCKTYPE_RECURSIVE,
326 		debug_lock_alloc,
327 		debug_lock_free,
328 		debug_lock_lock,
329 		debug_lock_unlock
330 	};
331 	if (evthread_lock_debugging_enabled_)
332 		return;
333 	memcpy(&original_lock_fns_, &evthread_lock_fns_,
334 	    sizeof(struct evthread_lock_callbacks));
335 	memcpy(&evthread_lock_fns_, &cbs,
336 	    sizeof(struct evthread_lock_callbacks));
337 
338 	memcpy(&original_cond_fns_, &evthread_cond_fns_,
339 	    sizeof(struct evthread_condition_callbacks));
340 	evthread_cond_fns_.wait_condition = debug_cond_wait;
341 	evthread_lock_debugging_enabled_ = 1;
342 
343 	/* XXX return value should get checked. */
344 	event_global_setup_locks_(0);
345 }
346 
347 int
348 evthread_is_debug_lock_held_(void *lock_)
349 {
350 	struct debug_lock *lock = lock_;
351 	if (! lock->count)
352 		return 0;
353 	if (evthread_id_fn_) {
354 		unsigned long me = evthread_id_fn_();
355 		if (lock->held_by != me)
356 			return 0;
357 	}
358 	return 1;
359 }
360 
361 void *
362 evthread_debug_get_real_lock_(void *lock_)
363 {
364 	struct debug_lock *lock = lock_;
365 	return lock->lock;
366 }
367 
368 void *
369 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
370 {
371 	/* there are four cases here:
372 	   1) we're turning on debugging; locking is not on.
373 	   2) we're turning on debugging; locking is on.
374 	   3) we're turning on locking; debugging is not on.
375 	   4) we're turning on locking; debugging is on. */
376 
377 	if (!enable_locks && original_lock_fns_.alloc == NULL) {
378 		/* Case 1: allocate a debug lock. */
379 		EVUTIL_ASSERT(lock_ == NULL);
380 		return debug_lock_alloc(locktype);
381 	} else if (!enable_locks && original_lock_fns_.alloc != NULL) {
382 		/* Case 2: wrap the lock in a debug lock. */
383 		struct debug_lock *lock;
384 		EVUTIL_ASSERT(lock_ != NULL);
385 
386 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
387 			/* We can't wrap it: We need a recursive lock */
388 			original_lock_fns_.free(lock_, locktype);
389 			return debug_lock_alloc(locktype);
390 		}
391 		lock = mm_malloc(sizeof(struct debug_lock));
392 		if (!lock) {
393 			original_lock_fns_.free(lock_, locktype);
394 			return NULL;
395 		}
396 		lock->lock = lock_;
397 		lock->locktype = locktype;
398 		lock->count = 0;
399 		lock->held_by = 0;
400 		return lock;
401 	} else if (enable_locks && ! evthread_lock_debugging_enabled_) {
402 		/* Case 3: allocate a regular lock */
403 		EVUTIL_ASSERT(lock_ == NULL);
404 		return evthread_lock_fns_.alloc(locktype);
405 	} else {
406 		/* Case 4: Fill in a debug lock with a real lock */
407 		struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
408 		EVUTIL_ASSERT(enable_locks &&
409 		              evthread_lock_debugging_enabled_);
410 		EVUTIL_ASSERT(lock->locktype == locktype);
411 		if (!lock->lock) {
412 			lock->lock = original_lock_fns_.alloc(
413 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
414 			if (!lock->lock) {
415 				lock->count = -200;
416 				mm_free(lock);
417 				return NULL;
418 			}
419 		}
420 		return lock;
421 	}
422 }
423 
424 
425 #ifndef EVTHREAD_EXPOSE_STRUCTS
426 unsigned long
427 evthreadimpl_get_id_()
428 {
429 	return evthread_id_fn_ ? evthread_id_fn_() : 1;
430 }
431 void *
432 evthreadimpl_lock_alloc_(unsigned locktype)
433 {
434 #ifndef EVENT__DISABLE_DEBUG_MODE
435 	if (event_debug_mode_on_) {
436 		event_debug_created_threadable_ctx_ = 1;
437 	}
438 #endif
439 
440 	return evthread_lock_fns_.alloc ?
441 	    evthread_lock_fns_.alloc(locktype) : NULL;
442 }
443 void
444 evthreadimpl_lock_free_(void *lock, unsigned locktype)
445 {
446 	if (evthread_lock_fns_.free)
447 		evthread_lock_fns_.free(lock, locktype);
448 }
449 int
450 evthreadimpl_lock_lock_(unsigned mode, void *lock)
451 {
452 	if (evthread_lock_fns_.lock)
453 		return evthread_lock_fns_.lock(mode, lock);
454 	else
455 		return 0;
456 }
457 int
458 evthreadimpl_lock_unlock_(unsigned mode, void *lock)
459 {
460 	if (evthread_lock_fns_.unlock)
461 		return evthread_lock_fns_.unlock(mode, lock);
462 	else
463 		return 0;
464 }
465 void *
466 evthreadimpl_cond_alloc_(unsigned condtype)
467 {
468 #ifndef EVENT__DISABLE_DEBUG_MODE
469 	if (event_debug_mode_on_) {
470 		event_debug_created_threadable_ctx_ = 1;
471 	}
472 #endif
473 
474 	return evthread_cond_fns_.alloc_condition ?
475 	    evthread_cond_fns_.alloc_condition(condtype) : NULL;
476 }
477 void
478 evthreadimpl_cond_free_(void *cond)
479 {
480 	if (evthread_cond_fns_.free_condition)
481 		evthread_cond_fns_.free_condition(cond);
482 }
483 int
484 evthreadimpl_cond_signal_(void *cond, int broadcast)
485 {
486 	if (evthread_cond_fns_.signal_condition)
487 		return evthread_cond_fns_.signal_condition(cond, broadcast);
488 	else
489 		return 0;
490 }
491 int
492 evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
493 {
494 	if (evthread_cond_fns_.wait_condition)
495 		return evthread_cond_fns_.wait_condition(cond, lock, tv);
496 	else
497 		return 0;
498 }
499 int
500 evthreadimpl_is_lock_debugging_enabled_(void)
501 {
502 	return evthread_lock_debugging_enabled_;
503 }
504 
505 int
506 evthreadimpl_locking_enabled_(void)
507 {
508 	return evthread_lock_fns_.lock != NULL;
509 }
510 #endif
511 
512 #endif
513