1 /* $NetBSD: evthread.c,v 1.1.1.1 2013/12/27 23:31:18 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include "event2/event-config.h" 30 #include "evconfig-private.h" 31 32 #ifndef EVENT__DISABLE_THREAD_SUPPORT 33 34 #include "event2/thread.h" 35 36 #include <stdlib.h> 37 #include <string.h> 38 39 #include "log-internal.h" 40 #include "mm-internal.h" 41 #include "util-internal.h" 42 #include "evthread-internal.h" 43 44 #ifdef EVTHREAD_EXPOSE_STRUCTS 45 #define GLOBAL 46 #else 47 #define GLOBAL static 48 #endif 49 50 /* globals */ 51 GLOBAL int evthread_lock_debugging_enabled_ = 0; 52 GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = { 53 0, 0, NULL, NULL, NULL, NULL 54 }; 55 GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL; 56 GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = { 57 0, NULL, NULL, NULL, NULL 58 }; 59 60 /* Used for debugging */ 61 static struct evthread_lock_callbacks original_lock_fns_ = { 62 0, 0, NULL, NULL, NULL, NULL 63 }; 64 static struct evthread_condition_callbacks original_cond_fns_ = { 65 0, NULL, NULL, NULL, NULL 66 }; 67 68 void 69 evthread_set_id_callback(unsigned long (*id_fn)(void)) 70 { 71 evthread_id_fn_ = id_fn; 72 } 73 74 int 75 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs) 76 { 77 struct evthread_lock_callbacks *target = 78 evthread_lock_debugging_enabled_ 79 ? &original_lock_fns_ : &evthread_lock_fns_; 80 81 if (!cbs) { 82 if (target->alloc) 83 event_warnx("Trying to disable lock functions after " 84 "they have been set up will probaby not work."); 85 memset(target, 0, sizeof(evthread_lock_fns_)); 86 return 0; 87 } 88 if (target->alloc) { 89 /* Uh oh; we already had locking callbacks set up.*/ 90 if (target->lock_api_version == cbs->lock_api_version && 91 target->supported_locktypes == cbs->supported_locktypes && 92 target->alloc == cbs->alloc && 93 target->free == cbs->free && 94 target->lock == cbs->lock && 95 target->unlock == cbs->unlock) { 96 /* no change -- allow this. */ 97 return 0; 98 } 99 event_warnx("Can't change lock callbacks once they have been " 100 "initialized."); 101 return -1; 102 } 103 if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) { 104 memcpy(target, cbs, sizeof(evthread_lock_fns_)); 105 return event_global_setup_locks_(1); 106 } else { 107 return -1; 108 } 109 } 110 111 int 112 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs) 113 { 114 struct evthread_condition_callbacks *target = 115 evthread_lock_debugging_enabled_ 116 ? &original_cond_fns_ : &evthread_cond_fns_; 117 118 if (!cbs) { 119 if (target->alloc_condition) 120 event_warnx("Trying to disable condition functions " 121 "after they have been set up will probaby not " 122 "work."); 123 memset(target, 0, sizeof(evthread_cond_fns_)); 124 return 0; 125 } 126 if (target->alloc_condition) { 127 /* Uh oh; we already had condition callbacks set up.*/ 128 if (target->condition_api_version == cbs->condition_api_version && 129 target->alloc_condition == cbs->alloc_condition && 130 target->free_condition == cbs->free_condition && 131 target->signal_condition == cbs->signal_condition && 132 target->wait_condition == cbs->wait_condition) { 133 /* no change -- allow this. */ 134 return 0; 135 } 136 event_warnx("Can't change condition callbacks once they " 137 "have been initialized."); 138 return -1; 139 } 140 if (cbs->alloc_condition && cbs->free_condition && 141 cbs->signal_condition && cbs->wait_condition) { 142 memcpy(target, cbs, sizeof(evthread_cond_fns_)); 143 } 144 if (evthread_lock_debugging_enabled_) { 145 evthread_cond_fns_.alloc_condition = cbs->alloc_condition; 146 evthread_cond_fns_.free_condition = cbs->free_condition; 147 evthread_cond_fns_.signal_condition = cbs->signal_condition; 148 } 149 return 0; 150 } 151 152 #define DEBUG_LOCK_SIG 0xdeb0b10c 153 154 struct debug_lock { 155 unsigned signature; 156 unsigned locktype; 157 unsigned long held_by; 158 /* XXXX if we ever use read-write locks, we will need a separate 159 * lock to protect count. */ 160 int count; 161 void *lock; 162 }; 163 164 static void * 165 debug_lock_alloc(unsigned locktype) 166 { 167 struct debug_lock *result = mm_malloc(sizeof(struct debug_lock)); 168 if (!result) 169 return NULL; 170 if (original_lock_fns_.alloc) { 171 if (!(result->lock = original_lock_fns_.alloc( 172 locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) { 173 mm_free(result); 174 return NULL; 175 } 176 } else { 177 result->lock = NULL; 178 } 179 result->signature = DEBUG_LOCK_SIG; 180 result->locktype = locktype; 181 result->count = 0; 182 result->held_by = 0; 183 return result; 184 } 185 186 static void 187 debug_lock_free(void *lock_, unsigned locktype) 188 { 189 struct debug_lock *lock = lock_; 190 EVUTIL_ASSERT(lock->count == 0); 191 EVUTIL_ASSERT(locktype == lock->locktype); 192 EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature); 193 if (original_lock_fns_.free) { 194 original_lock_fns_.free(lock->lock, 195 lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE); 196 } 197 lock->lock = NULL; 198 lock->count = -100; 199 lock->signature = 0x12300fda; 200 mm_free(lock); 201 } 202 203 static void 204 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock) 205 { 206 EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature); 207 ++lock->count; 208 if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) 209 EVUTIL_ASSERT(lock->count == 1); 210 if (evthread_id_fn_) { 211 unsigned long me; 212 me = evthread_id_fn_(); 213 if (lock->count > 1) 214 EVUTIL_ASSERT(lock->held_by == me); 215 lock->held_by = me; 216 } 217 } 218 219 static int 220 debug_lock_lock(unsigned mode, void *lock_) 221 { 222 struct debug_lock *lock = lock_; 223 int res = 0; 224 if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE) 225 EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE)); 226 else 227 EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0); 228 if (original_lock_fns_.lock) 229 res = original_lock_fns_.lock(mode, lock->lock); 230 if (!res) { 231 evthread_debug_lock_mark_locked(mode, lock); 232 } 233 return res; 234 } 235 236 static void 237 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock) 238 { 239 EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature); 240 if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE) 241 EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE)); 242 else 243 EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0); 244 if (evthread_id_fn_) { 245 unsigned long me; 246 me = evthread_id_fn_(); 247 EVUTIL_ASSERT(lock->held_by == me); 248 if (lock->count == 1) 249 lock->held_by = 0; 250 } 251 --lock->count; 252 EVUTIL_ASSERT(lock->count >= 0); 253 } 254 255 static int 256 debug_lock_unlock(unsigned mode, void *lock_) 257 { 258 struct debug_lock *lock = lock_; 259 int res = 0; 260 evthread_debug_lock_mark_unlocked(mode, lock); 261 if (original_lock_fns_.unlock) 262 res = original_lock_fns_.unlock(mode, lock->lock); 263 return res; 264 } 265 266 static int 267 debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv) 268 { 269 int r; 270 struct debug_lock *lock = lock_; 271 EVUTIL_ASSERT(lock); 272 EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature); 273 EVLOCK_ASSERT_LOCKED(lock_); 274 evthread_debug_lock_mark_unlocked(0, lock); 275 r = original_cond_fns_.wait_condition(cond_, lock->lock, tv); 276 evthread_debug_lock_mark_locked(0, lock); 277 return r; 278 } 279 280 /* misspelled version for backward compatibility */ 281 void 282 evthread_enable_lock_debuging(void) 283 { 284 evthread_enable_lock_debugging(); 285 } 286 287 void 288 evthread_enable_lock_debugging(void) 289 { 290 struct evthread_lock_callbacks cbs = { 291 EVTHREAD_LOCK_API_VERSION, 292 EVTHREAD_LOCKTYPE_RECURSIVE, 293 debug_lock_alloc, 294 debug_lock_free, 295 debug_lock_lock, 296 debug_lock_unlock 297 }; 298 if (evthread_lock_debugging_enabled_) 299 return; 300 memcpy(&original_lock_fns_, &evthread_lock_fns_, 301 sizeof(struct evthread_lock_callbacks)); 302 memcpy(&evthread_lock_fns_, &cbs, 303 sizeof(struct evthread_lock_callbacks)); 304 305 memcpy(&original_cond_fns_, &evthread_cond_fns_, 306 sizeof(struct evthread_condition_callbacks)); 307 evthread_cond_fns_.wait_condition = debug_cond_wait; 308 evthread_lock_debugging_enabled_ = 1; 309 310 /* XXX return value should get checked. */ 311 event_global_setup_locks_(0); 312 } 313 314 int 315 evthread_is_debug_lock_held_(void *lock_) 316 { 317 struct debug_lock *lock = lock_; 318 if (! lock->count) 319 return 0; 320 if (evthread_id_fn_) { 321 unsigned long me = evthread_id_fn_(); 322 if (lock->held_by != me) 323 return 0; 324 } 325 return 1; 326 } 327 328 void * 329 evthread_debug_get_real_lock_(void *lock_) 330 { 331 struct debug_lock *lock = lock_; 332 return lock->lock; 333 } 334 335 void * 336 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks) 337 { 338 /* there are four cases here: 339 1) we're turning on debugging; locking is not on. 340 2) we're turning on debugging; locking is on. 341 3) we're turning on locking; debugging is not on. 342 4) we're turning on locking; debugging is on. */ 343 344 if (!enable_locks && original_lock_fns_.alloc == NULL) { 345 /* Case 1: allocate a debug lock. */ 346 EVUTIL_ASSERT(lock_ == NULL); 347 return debug_lock_alloc(locktype); 348 } else if (!enable_locks && original_lock_fns_.alloc != NULL) { 349 /* Case 2: wrap the lock in a debug lock. */ 350 struct debug_lock *lock; 351 EVUTIL_ASSERT(lock_ != NULL); 352 353 if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) { 354 /* We can't wrap it: We need a recursive lock */ 355 original_lock_fns_.free(lock_, locktype); 356 return debug_lock_alloc(locktype); 357 } 358 lock = mm_malloc(sizeof(struct debug_lock)); 359 if (!lock) { 360 original_lock_fns_.free(lock_, locktype); 361 return NULL; 362 } 363 lock->lock = lock_; 364 lock->locktype = locktype; 365 lock->count = 0; 366 lock->held_by = 0; 367 return lock; 368 } else if (enable_locks && ! evthread_lock_debugging_enabled_) { 369 /* Case 3: allocate a regular lock */ 370 EVUTIL_ASSERT(lock_ == NULL); 371 return evthread_lock_fns_.alloc(locktype); 372 } else { 373 /* Case 4: Fill in a debug lock with a real lock */ 374 struct debug_lock *lock = lock_; 375 EVUTIL_ASSERT(enable_locks && 376 evthread_lock_debugging_enabled_); 377 EVUTIL_ASSERT(lock->locktype == locktype); 378 EVUTIL_ASSERT(lock->lock == NULL); 379 lock->lock = original_lock_fns_.alloc( 380 locktype|EVTHREAD_LOCKTYPE_RECURSIVE); 381 if (!lock->lock) { 382 lock->count = -200; 383 mm_free(lock); 384 return NULL; 385 } 386 return lock; 387 } 388 } 389 390 391 #ifndef EVTHREAD_EXPOSE_STRUCTS 392 unsigned long 393 evthreadimpl_get_id_() 394 { 395 return evthread_id_fn_ ? evthread_id_fn_() : 1; 396 } 397 void * 398 evthreadimpl_lock_alloc_(unsigned locktype) 399 { 400 return evthread_lock_fns_.alloc ? 401 evthread_lock_fns_.alloc(locktype) : NULL; 402 } 403 void 404 evthreadimpl_lock_free_(void *lock, unsigned locktype) 405 { 406 if (evthread_lock_fns_.free) 407 evthread_lock_fns_.free(lock, locktype); 408 } 409 int 410 evthreadimpl_lock_lock_(unsigned mode, void *lock) 411 { 412 if (evthread_lock_fns_.lock) 413 return evthread_lock_fns_.lock(mode, lock); 414 else 415 return 0; 416 } 417 int 418 evthreadimpl_lock_unlock_(unsigned mode, void *lock) 419 { 420 if (evthread_lock_fns_.unlock) 421 return evthread_lock_fns_.unlock(mode, lock); 422 else 423 return 0; 424 } 425 void * 426 evthreadimpl_cond_alloc_(unsigned condtype) 427 { 428 return evthread_cond_fns_.alloc_condition ? 429 evthread_cond_fns_.alloc_condition(condtype) : NULL; 430 } 431 void 432 evthreadimpl_cond_free_(void *cond) 433 { 434 if (evthread_cond_fns_.free_condition) 435 evthread_cond_fns_.free_condition(cond); 436 } 437 int 438 evthreadimpl_cond_signal_(void *cond, int broadcast) 439 { 440 if (evthread_cond_fns_.signal_condition) 441 return evthread_cond_fns_.signal_condition(cond, broadcast); 442 else 443 return 0; 444 } 445 int 446 evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv) 447 { 448 if (evthread_cond_fns_.wait_condition) 449 return evthread_cond_fns_.wait_condition(cond, lock, tv); 450 else 451 return 0; 452 } 453 int 454 evthreadimpl_is_lock_debugging_enabled_(void) 455 { 456 return evthread_lock_debugging_enabled_; 457 } 458 459 int 460 evthreadimpl_locking_enabled_(void) 461 { 462 return evthread_lock_fns_.lock != NULL; 463 } 464 #endif 465 466 #endif 467