xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/evthread.c (revision 7476e6e4d2caace010b86653ab2a1cb4eb355cdf)
1 /*	$NetBSD: evthread.c,v 1.3 2015/04/07 17:34:20 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "event2/event-config.h"
30 #include "evconfig-private.h"
31 
32 #ifndef EVENT__DISABLE_THREAD_SUPPORT
33 
34 #include "event2/thread.h"
35 
36 #include <stdlib.h>
37 #include <string.h>
38 
39 #include "log-internal.h"
40 #include "mm-internal.h"
41 #include "util-internal.h"
42 #include "evthread-internal.h"
43 
44 #ifdef EVTHREAD_EXPOSE_STRUCTS
45 #define GLOBAL
46 #else
47 #define GLOBAL static
48 #endif
49 
50 /* globals */
51 GLOBAL int evthread_lock_debugging_enabled_ = 0;
52 GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
53 	0, 0, NULL, NULL, NULL, NULL
54 };
55 GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
56 GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
57 	0, NULL, NULL, NULL, NULL
58 };
59 
60 /* Used for debugging */
61 static struct evthread_lock_callbacks original_lock_fns_ = {
62 	0, 0, NULL, NULL, NULL, NULL
63 };
64 static struct evthread_condition_callbacks original_cond_fns_ = {
65 	0, NULL, NULL, NULL, NULL
66 };
67 
68 void
69 evthread_set_id_callback(unsigned long (*id_fn)(void))
70 {
71 	evthread_id_fn_ = id_fn;
72 }
73 
74 struct evthread_lock_callbacks *evthread_get_lock_callbacks()
75 {
76 	return evthread_lock_debugging_enabled_
77 	    ? &original_lock_fns_ : &evthread_lock_fns_;
78 }
79 struct evthread_condition_callbacks *evthread_get_condition_callbacks()
80 {
81 	return evthread_lock_debugging_enabled_
82 	    ? &original_cond_fns_ : &evthread_cond_fns_;
83 }
84 void evthreadimpl_disable_lock_debugging_(void)
85 {
86 	evthread_lock_debugging_enabled_ = 0;
87 }
88 
89 int
90 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
91 {
92 	struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
93 
94 	if (!cbs) {
95 		if (target->alloc)
96 			event_warnx("Trying to disable lock functions after "
97 			    "they have been set up will probaby not work.");
98 		memset(target, 0, sizeof(evthread_lock_fns_));
99 		return 0;
100 	}
101 	if (target->alloc) {
102 		/* Uh oh; we already had locking callbacks set up.*/
103 		if (target->lock_api_version == cbs->lock_api_version &&
104 			target->supported_locktypes == cbs->supported_locktypes &&
105 			target->alloc == cbs->alloc &&
106 			target->free == cbs->free &&
107 			target->lock == cbs->lock &&
108 			target->unlock == cbs->unlock) {
109 			/* no change -- allow this. */
110 			return 0;
111 		}
112 		event_warnx("Can't change lock callbacks once they have been "
113 		    "initialized.");
114 		return -1;
115 	}
116 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
117 		memcpy(target, cbs, sizeof(evthread_lock_fns_));
118 		return event_global_setup_locks_(1);
119 	} else {
120 		return -1;
121 	}
122 }
123 
124 int
125 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
126 {
127 	struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
128 
129 	if (!cbs) {
130 		if (target->alloc_condition)
131 			event_warnx("Trying to disable condition functions "
132 			    "after they have been set up will probaby not "
133 			    "work.");
134 		memset(target, 0, sizeof(evthread_cond_fns_));
135 		return 0;
136 	}
137 	if (target->alloc_condition) {
138 		/* Uh oh; we already had condition callbacks set up.*/
139 		if (target->condition_api_version == cbs->condition_api_version &&
140 			target->alloc_condition == cbs->alloc_condition &&
141 			target->free_condition == cbs->free_condition &&
142 			target->signal_condition == cbs->signal_condition &&
143 			target->wait_condition == cbs->wait_condition) {
144 			/* no change -- allow this. */
145 			return 0;
146 		}
147 		event_warnx("Can't change condition callbacks once they "
148 		    "have been initialized.");
149 		return -1;
150 	}
151 	if (cbs->alloc_condition && cbs->free_condition &&
152 	    cbs->signal_condition && cbs->wait_condition) {
153 		memcpy(target, cbs, sizeof(evthread_cond_fns_));
154 	}
155 	if (evthread_lock_debugging_enabled_) {
156 		evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
157 		evthread_cond_fns_.free_condition = cbs->free_condition;
158 		evthread_cond_fns_.signal_condition = cbs->signal_condition;
159 	}
160 	return 0;
161 }
162 
163 #define DEBUG_LOCK_SIG	0xdeb0b10c
164 
165 struct debug_lock {
166 	unsigned signature;
167 	unsigned locktype;
168 	unsigned long held_by;
169 	/* XXXX if we ever use read-write locks, we will need a separate
170 	 * lock to protect count. */
171 	int count;
172 	void *lock;
173 };
174 
175 static void *
176 debug_lock_alloc(unsigned locktype)
177 {
178 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
179 	if (!result)
180 		return NULL;
181 	if (original_lock_fns_.alloc) {
182 		if (!(result->lock = original_lock_fns_.alloc(
183 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
184 			mm_free(result);
185 			return NULL;
186 		}
187 	} else {
188 		result->lock = NULL;
189 	}
190 	result->signature = DEBUG_LOCK_SIG;
191 	result->locktype = locktype;
192 	result->count = 0;
193 	result->held_by = 0;
194 	return result;
195 }
196 
197 static void
198 debug_lock_free(void *lock_, unsigned locktype)
199 {
200 	struct debug_lock *lock = lock_;
201 	EVUTIL_ASSERT(lock->count == 0);
202 	EVUTIL_ASSERT(locktype == lock->locktype);
203 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
204 	if (original_lock_fns_.free) {
205 		original_lock_fns_.free(lock->lock,
206 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
207 	}
208 	lock->lock = NULL;
209 	lock->count = -100;
210 	lock->signature = 0x12300fda;
211 	mm_free(lock);
212 }
213 
214 static void
215 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
216 {
217 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
218 	++lock->count;
219 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
220 		EVUTIL_ASSERT(lock->count == 1);
221 	if (evthread_id_fn_) {
222 		unsigned long me;
223 		me = evthread_id_fn_();
224 		if (lock->count > 1)
225 			EVUTIL_ASSERT(lock->held_by == me);
226 		lock->held_by = me;
227 	}
228 }
229 
230 static int
231 debug_lock_lock(unsigned mode, void *lock_)
232 {
233 	struct debug_lock *lock = lock_;
234 	int res = 0;
235 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
236 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
237 	else
238 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
239 	if (original_lock_fns_.lock)
240 		res = original_lock_fns_.lock(mode, lock->lock);
241 	if (!res) {
242 		evthread_debug_lock_mark_locked(mode, lock);
243 	}
244 	return res;
245 }
246 
247 static void
248 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
249 {
250 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
251 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
252 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
253 	else
254 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
255 	if (evthread_id_fn_) {
256 		unsigned long me;
257 		me = evthread_id_fn_();
258 		EVUTIL_ASSERT(lock->held_by == me);
259 		if (lock->count == 1)
260 			lock->held_by = 0;
261 	}
262 	--lock->count;
263 	EVUTIL_ASSERT(lock->count >= 0);
264 }
265 
266 static int
267 debug_lock_unlock(unsigned mode, void *lock_)
268 {
269 	struct debug_lock *lock = lock_;
270 	int res = 0;
271 	evthread_debug_lock_mark_unlocked(mode, lock);
272 	if (original_lock_fns_.unlock)
273 		res = original_lock_fns_.unlock(mode, lock->lock);
274 	return res;
275 }
276 
277 static int
278 debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
279 {
280 	int r;
281 	struct debug_lock *lock = lock_;
282 	EVUTIL_ASSERT(lock);
283 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
284 	EVLOCK_ASSERT_LOCKED(lock_);
285 	evthread_debug_lock_mark_unlocked(0, lock);
286 	r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
287 	evthread_debug_lock_mark_locked(0, lock);
288 	return r;
289 }
290 
291 /* misspelled version for backward compatibility */
292 void
293 evthread_enable_lock_debuging(void)
294 {
295 	evthread_enable_lock_debugging();
296 }
297 
298 void
299 evthread_enable_lock_debugging(void)
300 {
301 	struct evthread_lock_callbacks cbs = {
302 		EVTHREAD_LOCK_API_VERSION,
303 		EVTHREAD_LOCKTYPE_RECURSIVE,
304 		debug_lock_alloc,
305 		debug_lock_free,
306 		debug_lock_lock,
307 		debug_lock_unlock
308 	};
309 	if (evthread_lock_debugging_enabled_)
310 		return;
311 	memcpy(&original_lock_fns_, &evthread_lock_fns_,
312 	    sizeof(struct evthread_lock_callbacks));
313 	memcpy(&evthread_lock_fns_, &cbs,
314 	    sizeof(struct evthread_lock_callbacks));
315 
316 	memcpy(&original_cond_fns_, &evthread_cond_fns_,
317 	    sizeof(struct evthread_condition_callbacks));
318 	evthread_cond_fns_.wait_condition = debug_cond_wait;
319 	evthread_lock_debugging_enabled_ = 1;
320 
321 	/* XXX return value should get checked. */
322 	event_global_setup_locks_(0);
323 }
324 
325 int
326 evthread_is_debug_lock_held_(void *lock_)
327 {
328 	struct debug_lock *lock = lock_;
329 	if (! lock->count)
330 		return 0;
331 	if (evthread_id_fn_) {
332 		unsigned long me = evthread_id_fn_();
333 		if (lock->held_by != me)
334 			return 0;
335 	}
336 	return 1;
337 }
338 
339 void *
340 evthread_debug_get_real_lock_(void *lock_)
341 {
342 	struct debug_lock *lock = lock_;
343 	return lock->lock;
344 }
345 
346 void *
347 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
348 {
349 	/* there are four cases here:
350 	   1) we're turning on debugging; locking is not on.
351 	   2) we're turning on debugging; locking is on.
352 	   3) we're turning on locking; debugging is not on.
353 	   4) we're turning on locking; debugging is on. */
354 
355 	if (!enable_locks && original_lock_fns_.alloc == NULL) {
356 		/* Case 1: allocate a debug lock. */
357 		EVUTIL_ASSERT(lock_ == NULL);
358 		return debug_lock_alloc(locktype);
359 	} else if (!enable_locks && original_lock_fns_.alloc != NULL) {
360 		/* Case 2: wrap the lock in a debug lock. */
361 		struct debug_lock *lock;
362 		EVUTIL_ASSERT(lock_ != NULL);
363 
364 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
365 			/* We can't wrap it: We need a recursive lock */
366 			original_lock_fns_.free(lock_, locktype);
367 			return debug_lock_alloc(locktype);
368 		}
369 		lock = mm_malloc(sizeof(struct debug_lock));
370 		if (!lock) {
371 			original_lock_fns_.free(lock_, locktype);
372 			return NULL;
373 		}
374 		lock->lock = lock_;
375 		lock->locktype = locktype;
376 		lock->count = 0;
377 		lock->held_by = 0;
378 		return lock;
379 	} else if (enable_locks && ! evthread_lock_debugging_enabled_) {
380 		/* Case 3: allocate a regular lock */
381 		EVUTIL_ASSERT(lock_ == NULL);
382 		return evthread_lock_fns_.alloc(locktype);
383 	} else {
384 		/* Case 4: Fill in a debug lock with a real lock */
385 		struct debug_lock *lock = lock_;
386 		EVUTIL_ASSERT(enable_locks &&
387 		              evthread_lock_debugging_enabled_);
388 		EVUTIL_ASSERT(lock->locktype == locktype);
389 		EVUTIL_ASSERT(lock->lock == NULL);
390 		lock->lock = original_lock_fns_.alloc(
391 			locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
392 		if (!lock->lock) {
393 			lock->count = -200;
394 			mm_free(lock);
395 			return NULL;
396 		}
397 		return lock;
398 	}
399 }
400 
401 
402 #ifndef EVTHREAD_EXPOSE_STRUCTS
403 unsigned long
404 evthreadimpl_get_id_()
405 {
406 	return evthread_id_fn_ ? evthread_id_fn_() : 1;
407 }
408 void *
409 evthreadimpl_lock_alloc_(unsigned locktype)
410 {
411 	return evthread_lock_fns_.alloc ?
412 	    evthread_lock_fns_.alloc(locktype) : NULL;
413 }
414 void
415 evthreadimpl_lock_free_(void *lock, unsigned locktype)
416 {
417 	if (evthread_lock_fns_.free)
418 		evthread_lock_fns_.free(lock, locktype);
419 }
420 int
421 evthreadimpl_lock_lock_(unsigned mode, void *lock)
422 {
423 	if (evthread_lock_fns_.lock)
424 		return evthread_lock_fns_.lock(mode, lock);
425 	else
426 		return 0;
427 }
428 int
429 evthreadimpl_lock_unlock_(unsigned mode, void *lock)
430 {
431 	if (evthread_lock_fns_.unlock)
432 		return evthread_lock_fns_.unlock(mode, lock);
433 	else
434 		return 0;
435 }
436 void *
437 evthreadimpl_cond_alloc_(unsigned condtype)
438 {
439 	return evthread_cond_fns_.alloc_condition ?
440 	    evthread_cond_fns_.alloc_condition(condtype) : NULL;
441 }
442 void
443 evthreadimpl_cond_free_(void *cond)
444 {
445 	if (evthread_cond_fns_.free_condition)
446 		evthread_cond_fns_.free_condition(cond);
447 }
448 int
449 evthreadimpl_cond_signal_(void *cond, int broadcast)
450 {
451 	if (evthread_cond_fns_.signal_condition)
452 		return evthread_cond_fns_.signal_condition(cond, broadcast);
453 	else
454 		return 0;
455 }
456 int
457 evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
458 {
459 	if (evthread_cond_fns_.wait_condition)
460 		return evthread_cond_fns_.wait_condition(cond, lock, tv);
461 	else
462 		return 0;
463 }
464 int
465 evthreadimpl_is_lock_debugging_enabled_(void)
466 {
467 	return evthread_lock_debugging_enabled_;
468 }
469 
470 int
471 evthreadimpl_locking_enabled_(void)
472 {
473 	return evthread_lock_fns_.lock != NULL;
474 }
475 #endif
476 
477 #endif
478