1*eabc0478Schristos /* $NetBSD: evthread-internal.h,v 1.7 2024/08/18 20:47:21 christos Exp $ */ 28585484eSchristos 38585484eSchristos /* 48585484eSchristos * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson 58585484eSchristos * 68585484eSchristos * Redistribution and use in source and binary forms, with or without 78585484eSchristos * modification, are permitted provided that the following conditions 88585484eSchristos * are met: 98585484eSchristos * 1. Redistributions of source code must retain the above copyright 108585484eSchristos * notice, this list of conditions and the following disclaimer. 118585484eSchristos * 2. Redistributions in binary form must reproduce the above copyright 128585484eSchristos * notice, this list of conditions and the following disclaimer in the 138585484eSchristos * documentation and/or other materials provided with the distribution. 148585484eSchristos * 3. The name of the author may not be used to endorse or promote products 158585484eSchristos * derived from this software without specific prior written permission. 168585484eSchristos * 178585484eSchristos * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 188585484eSchristos * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 198585484eSchristos * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 208585484eSchristos * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 218585484eSchristos * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 228585484eSchristos * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 238585484eSchristos * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 248585484eSchristos * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 258585484eSchristos * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 268585484eSchristos * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 278585484eSchristos */ 288585484eSchristos #ifndef EVTHREAD_INTERNAL_H_INCLUDED_ 298585484eSchristos #define EVTHREAD_INTERNAL_H_INCLUDED_ 308585484eSchristos 318585484eSchristos #ifdef __cplusplus 328585484eSchristos extern "C" { 338585484eSchristos #endif 348585484eSchristos 358585484eSchristos #include "event2/event-config.h" 368585484eSchristos #include "evconfig-private.h" 378585484eSchristos 388585484eSchristos #include "event2/thread.h" 398585484eSchristos #include "util-internal.h" 408585484eSchristos 418585484eSchristos struct event_base; 428585484eSchristos 43*eabc0478Schristos #if !defined(_WIN32) && !defined(__CYGWIN__) 448585484eSchristos /* On Windows, the way we currently make DLLs, it's not allowed for us to 458585484eSchristos * have shared global structures. Thus, we only do the direct-call-to-function 468585484eSchristos * code path if we know that the local shared library system supports it. 478585484eSchristos */ 488585484eSchristos #define EVTHREAD_EXPOSE_STRUCTS 498585484eSchristos #endif 508585484eSchristos 518585484eSchristos #if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS) 528585484eSchristos /* Global function pointers to lock-related functions. NULL if locking isn't 538585484eSchristos enabled. */ 54*eabc0478Schristos EVENT2_EXPORT_SYMBOL 558585484eSchristos extern struct evthread_lock_callbacks evthread_lock_fns_; 56*eabc0478Schristos EVENT2_EXPORT_SYMBOL 578585484eSchristos extern struct evthread_condition_callbacks evthread_cond_fns_; 588585484eSchristos extern unsigned long (*evthread_id_fn_)(void); 59*eabc0478Schristos EVENT2_EXPORT_SYMBOL 608585484eSchristos extern int evthread_lock_debugging_enabled_; 618585484eSchristos 628585484eSchristos /** Return the ID of the current thread, or 1 if threading isn't enabled. */ 638585484eSchristos #define EVTHREAD_GET_ID() \ 648585484eSchristos (evthread_id_fn_ ? evthread_id_fn_() : 1) 658585484eSchristos 668585484eSchristos /** Return true iff we're in the thread that is currently (or most recently) 678585484eSchristos * running a given event_base's loop. Requires lock. */ 688585484eSchristos #define EVBASE_IN_THREAD(base) \ 698585484eSchristos (evthread_id_fn_ == NULL || \ 708585484eSchristos (base)->th_owner_id == evthread_id_fn_()) 718585484eSchristos 728585484eSchristos /** Return true iff we need to notify the base's main thread about changes to 738585484eSchristos * its state, because it's currently running the main loop in another 748585484eSchristos * thread. Requires lock. */ 758585484eSchristos #define EVBASE_NEED_NOTIFY(base) \ 768585484eSchristos (evthread_id_fn_ != NULL && \ 778585484eSchristos (base)->running_loop && \ 788585484eSchristos (base)->th_owner_id != evthread_id_fn_()) 798585484eSchristos 808585484eSchristos /** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to 818585484eSchristos NULL if locking is not enabled. */ 828585484eSchristos #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \ 838585484eSchristos ((lockvar) = evthread_lock_fns_.alloc ? \ 848585484eSchristos evthread_lock_fns_.alloc(locktype) : NULL) 858585484eSchristos 868585484eSchristos /** Free a given lock, if it is present and locking is enabled. */ 878585484eSchristos #define EVTHREAD_FREE_LOCK(lockvar, locktype) \ 888585484eSchristos do { \ 898585484eSchristos void *lock_tmp_ = (lockvar); \ 908585484eSchristos if (lock_tmp_ && evthread_lock_fns_.free) \ 918585484eSchristos evthread_lock_fns_.free(lock_tmp_, (locktype)); \ 928585484eSchristos } while (0) 938585484eSchristos 948585484eSchristos /** Acquire a lock. */ 958585484eSchristos #define EVLOCK_LOCK(lockvar,mode) \ 968585484eSchristos do { \ 978585484eSchristos if (lockvar) \ 988585484eSchristos evthread_lock_fns_.lock(mode, lockvar); \ 998585484eSchristos } while (0) 1008585484eSchristos 1018585484eSchristos /** Release a lock */ 1028585484eSchristos #define EVLOCK_UNLOCK(lockvar,mode) \ 1038585484eSchristos do { \ 1048585484eSchristos if (lockvar) \ 1058585484eSchristos evthread_lock_fns_.unlock(mode, lockvar); \ 1068585484eSchristos } while (0) 1078585484eSchristos 1088585484eSchristos /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */ 1098585484eSchristos #define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \ 1108585484eSchristos do { \ 1118585484eSchristos if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ 1128585484eSchristos void *tmp = lockvar1; \ 1138585484eSchristos lockvar1 = lockvar2; \ 1148585484eSchristos lockvar2 = tmp; \ 1158585484eSchristos } \ 1168585484eSchristos } while (0) 1178585484eSchristos 1188585484eSchristos /** Lock an event_base, if it is set up for locking. Acquires the lock 1198585484eSchristos in the base structure whose field is named 'lockvar'. */ 1208585484eSchristos #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \ 1218585484eSchristos EVLOCK_LOCK((base)->lockvar, 0); \ 1228585484eSchristos } while (0) 1238585484eSchristos 1248585484eSchristos /** Unlock an event_base, if it is set up for locking. */ 1258585484eSchristos #define EVBASE_RELEASE_LOCK(base, lockvar) do { \ 1268585484eSchristos EVLOCK_UNLOCK((base)->lockvar, 0); \ 1278585484eSchristos } while (0) 1288585484eSchristos 1298585484eSchristos /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is 1308585484eSchristos * locked and held by us. */ 1318585484eSchristos #define EVLOCK_ASSERT_LOCKED(lock) \ 1328585484eSchristos do { \ 1338585484eSchristos if ((lock) && evthread_lock_debugging_enabled_) { \ 1348585484eSchristos EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \ 1358585484eSchristos } \ 1368585484eSchristos } while (0) 1378585484eSchristos 1388585484eSchristos /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we 1398585484eSchristos * manage to get it. */ 1408585484eSchristos static inline int EVLOCK_TRY_LOCK_(void *lock); 1418585484eSchristos static inline int 1428585484eSchristos EVLOCK_TRY_LOCK_(void *lock) 1438585484eSchristos { 1448585484eSchristos if (lock && evthread_lock_fns_.lock) { 1458585484eSchristos int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock); 1468585484eSchristos return !r; 1478585484eSchristos } else { 1488585484eSchristos /* Locking is disabled either globally or for this thing; 1498585484eSchristos * of course we count as having the lock. */ 1508585484eSchristos return 1; 1518585484eSchristos } 1528585484eSchristos } 1538585484eSchristos 1548585484eSchristos /** Allocate a new condition variable and store it in the void *, condvar */ 1558585484eSchristos #define EVTHREAD_ALLOC_COND(condvar) \ 1568585484eSchristos do { \ 1578585484eSchristos (condvar) = evthread_cond_fns_.alloc_condition ? \ 1588585484eSchristos evthread_cond_fns_.alloc_condition(0) : NULL; \ 1598585484eSchristos } while (0) 1608585484eSchristos /** Deallocate and free a condition variable in condvar */ 1618585484eSchristos #define EVTHREAD_FREE_COND(cond) \ 1628585484eSchristos do { \ 1638585484eSchristos if (cond) \ 1648585484eSchristos evthread_cond_fns_.free_condition((cond)); \ 1658585484eSchristos } while (0) 1668585484eSchristos /** Signal one thread waiting on cond */ 1678585484eSchristos #define EVTHREAD_COND_SIGNAL(cond) \ 1688585484eSchristos ( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 ) 1698585484eSchristos /** Signal all threads waiting on cond */ 1708585484eSchristos #define EVTHREAD_COND_BROADCAST(cond) \ 1718585484eSchristos ( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 ) 1728585484eSchristos /** Wait until the condition 'cond' is signalled. Must be called while 1738585484eSchristos * holding 'lock'. The lock will be released until the condition is 1748585484eSchristos * signalled, at which point it will be acquired again. Returns 0 for 1758585484eSchristos * success, -1 for failure. */ 1768585484eSchristos #define EVTHREAD_COND_WAIT(cond, lock) \ 1778585484eSchristos ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 ) 1788585484eSchristos /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1 1798585484eSchristos * on timeout. */ 1808585484eSchristos #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \ 1818585484eSchristos ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 ) 1828585484eSchristos 1838585484eSchristos /** True iff locking functions have been configured. */ 1848585484eSchristos #define EVTHREAD_LOCKING_ENABLED() \ 1858585484eSchristos (evthread_lock_fns_.lock != NULL) 1868585484eSchristos 1878585484eSchristos #elif ! defined(EVENT__DISABLE_THREAD_SUPPORT) 1888585484eSchristos 1898585484eSchristos unsigned long evthreadimpl_get_id_(void); 190*eabc0478Schristos EVENT2_EXPORT_SYMBOL 1918585484eSchristos int evthreadimpl_is_lock_debugging_enabled_(void); 192*eabc0478Schristos EVENT2_EXPORT_SYMBOL 1938585484eSchristos void *evthreadimpl_lock_alloc_(unsigned locktype); 194*eabc0478Schristos EVENT2_EXPORT_SYMBOL 1958585484eSchristos void evthreadimpl_lock_free_(void *lock, unsigned locktype); 196*eabc0478Schristos EVENT2_EXPORT_SYMBOL 1978585484eSchristos int evthreadimpl_lock_lock_(unsigned mode, void *lock); 198*eabc0478Schristos EVENT2_EXPORT_SYMBOL 1998585484eSchristos int evthreadimpl_lock_unlock_(unsigned mode, void *lock); 200*eabc0478Schristos EVENT2_EXPORT_SYMBOL 2018585484eSchristos void *evthreadimpl_cond_alloc_(unsigned condtype); 202*eabc0478Schristos EVENT2_EXPORT_SYMBOL 2038585484eSchristos void evthreadimpl_cond_free_(void *cond); 204*eabc0478Schristos EVENT2_EXPORT_SYMBOL 2058585484eSchristos int evthreadimpl_cond_signal_(void *cond, int broadcast); 206*eabc0478Schristos EVENT2_EXPORT_SYMBOL 2078585484eSchristos int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv); 2088585484eSchristos int evthreadimpl_locking_enabled_(void); 2098585484eSchristos 2108585484eSchristos #define EVTHREAD_GET_ID() evthreadimpl_get_id_() 2118585484eSchristos #define EVBASE_IN_THREAD(base) \ 2128585484eSchristos ((base)->th_owner_id == evthreadimpl_get_id_()) 2138585484eSchristos #define EVBASE_NEED_NOTIFY(base) \ 2148585484eSchristos ((base)->running_loop && \ 2158585484eSchristos ((base)->th_owner_id != evthreadimpl_get_id_())) 2168585484eSchristos 2178585484eSchristos #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \ 2188585484eSchristos ((lockvar) = evthreadimpl_lock_alloc_(locktype)) 2198585484eSchristos 2208585484eSchristos #define EVTHREAD_FREE_LOCK(lockvar, locktype) \ 2218585484eSchristos do { \ 2228585484eSchristos void *lock_tmp_ = (lockvar); \ 2238585484eSchristos if (lock_tmp_) \ 2248585484eSchristos evthreadimpl_lock_free_(lock_tmp_, (locktype)); \ 2258585484eSchristos } while (0) 2268585484eSchristos 2278585484eSchristos /** Acquire a lock. */ 2288585484eSchristos #define EVLOCK_LOCK(lockvar,mode) \ 2298585484eSchristos do { \ 2308585484eSchristos if (lockvar) \ 2318585484eSchristos evthreadimpl_lock_lock_(mode, lockvar); \ 2328585484eSchristos } while (0) 2338585484eSchristos 2348585484eSchristos /** Release a lock */ 2358585484eSchristos #define EVLOCK_UNLOCK(lockvar,mode) \ 2368585484eSchristos do { \ 2378585484eSchristos if (lockvar) \ 2388585484eSchristos evthreadimpl_lock_unlock_(mode, lockvar); \ 2398585484eSchristos } while (0) 2408585484eSchristos 2418585484eSchristos /** Lock an event_base, if it is set up for locking. Acquires the lock 2428585484eSchristos in the base structure whose field is named 'lockvar'. */ 2438585484eSchristos #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \ 2448585484eSchristos EVLOCK_LOCK((base)->lockvar, 0); \ 2458585484eSchristos } while (0) 2468585484eSchristos 2478585484eSchristos /** Unlock an event_base, if it is set up for locking. */ 2488585484eSchristos #define EVBASE_RELEASE_LOCK(base, lockvar) do { \ 2498585484eSchristos EVLOCK_UNLOCK((base)->lockvar, 0); \ 2508585484eSchristos } while (0) 2518585484eSchristos 2528585484eSchristos /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is 2538585484eSchristos * locked and held by us. */ 2548585484eSchristos #define EVLOCK_ASSERT_LOCKED(lock) \ 2558585484eSchristos do { \ 2568585484eSchristos if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \ 2578585484eSchristos EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \ 2588585484eSchristos } \ 2598585484eSchristos } while (0) 2608585484eSchristos 2618585484eSchristos /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we 2628585484eSchristos * manage to get it. */ 2638585484eSchristos static inline int EVLOCK_TRY_LOCK_(void *lock); 2648585484eSchristos static inline int 2658585484eSchristos EVLOCK_TRY_LOCK_(void *lock) 2668585484eSchristos { 2678585484eSchristos if (lock) { 2688585484eSchristos int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock); 2698585484eSchristos return !r; 2708585484eSchristos } else { 2718585484eSchristos /* Locking is disabled either globally or for this thing; 2728585484eSchristos * of course we count as having the lock. */ 2738585484eSchristos return 1; 2748585484eSchristos } 2758585484eSchristos } 2768585484eSchristos 2778585484eSchristos /** Allocate a new condition variable and store it in the void *, condvar */ 2788585484eSchristos #define EVTHREAD_ALLOC_COND(condvar) \ 2798585484eSchristos do { \ 2808585484eSchristos (condvar) = evthreadimpl_cond_alloc_(0); \ 2818585484eSchristos } while (0) 2828585484eSchristos /** Deallocate and free a condition variable in condvar */ 2838585484eSchristos #define EVTHREAD_FREE_COND(cond) \ 2848585484eSchristos do { \ 2858585484eSchristos if (cond) \ 2868585484eSchristos evthreadimpl_cond_free_((cond)); \ 2878585484eSchristos } while (0) 2888585484eSchristos /** Signal one thread waiting on cond */ 2898585484eSchristos #define EVTHREAD_COND_SIGNAL(cond) \ 2908585484eSchristos ( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 ) 2918585484eSchristos /** Signal all threads waiting on cond */ 2928585484eSchristos #define EVTHREAD_COND_BROADCAST(cond) \ 2938585484eSchristos ( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 ) 2948585484eSchristos /** Wait until the condition 'cond' is signalled. Must be called while 2958585484eSchristos * holding 'lock'. The lock will be released until the condition is 2968585484eSchristos * signalled, at which point it will be acquired again. Returns 0 for 2978585484eSchristos * success, -1 for failure. */ 2988585484eSchristos #define EVTHREAD_COND_WAIT(cond, lock) \ 2998585484eSchristos ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 ) 3008585484eSchristos /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1 3018585484eSchristos * on timeout. */ 3028585484eSchristos #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \ 3038585484eSchristos ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 ) 3048585484eSchristos 3058585484eSchristos #define EVTHREAD_LOCKING_ENABLED() \ 3068585484eSchristos (evthreadimpl_locking_enabled_()) 3078585484eSchristos 3088585484eSchristos #else /* EVENT__DISABLE_THREAD_SUPPORT */ 3098585484eSchristos 3108585484eSchristos #define EVTHREAD_GET_ID() 1 3118585484eSchristos #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_ 3128585484eSchristos #define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_ 3138585484eSchristos 3148585484eSchristos #define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_ 3158585484eSchristos #define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_ 3168585484eSchristos #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_ 3178585484eSchristos #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_ 3188585484eSchristos 3198585484eSchristos #define EVBASE_IN_THREAD(base) 1 3208585484eSchristos #define EVBASE_NEED_NOTIFY(base) 0 3218585484eSchristos #define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_ 3228585484eSchristos #define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_ 3238585484eSchristos #define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_ 3248585484eSchristos 3258585484eSchristos #define EVLOCK_TRY_LOCK_(lock) 1 3268585484eSchristos 3278585484eSchristos #define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_ 3288585484eSchristos #define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_ 3298585484eSchristos #define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_ 3308585484eSchristos #define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_ 3318585484eSchristos #define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_ 3328585484eSchristos #define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_ 3338585484eSchristos 3348585484eSchristos #define EVTHREAD_LOCKING_ENABLED() 0 3358585484eSchristos 3368585484eSchristos #endif 3378585484eSchristos 3388585484eSchristos /* This code is shared between both lock impls */ 3398585484eSchristos #if ! defined(EVENT__DISABLE_THREAD_SUPPORT) 3408585484eSchristos /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */ 3418585484eSchristos #define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \ 3428585484eSchristos do { \ 3438585484eSchristos if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ 3448585484eSchristos void *tmp = lockvar1; \ 3458585484eSchristos lockvar1 = lockvar2; \ 3468585484eSchristos lockvar2 = tmp; \ 3478585484eSchristos } \ 3488585484eSchristos } while (0) 3498585484eSchristos 3508585484eSchristos /** Acquire both lock1 and lock2. Always allocates locks in the same order, 3518585484eSchristos * so that two threads locking two locks with LOCK2 will not deadlock. */ 3528585484eSchristos #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \ 3538585484eSchristos do { \ 3548585484eSchristos void *lock1_tmplock_ = (lock1); \ 3558585484eSchristos void *lock2_tmplock_ = (lock2); \ 3568585484eSchristos EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \ 3578585484eSchristos EVLOCK_LOCK(lock1_tmplock_,mode1); \ 3588585484eSchristos if (lock2_tmplock_ != lock1_tmplock_) \ 3598585484eSchristos EVLOCK_LOCK(lock2_tmplock_,mode2); \ 3608585484eSchristos } while (0) 3618585484eSchristos /** Release both lock1 and lock2. */ 3628585484eSchristos #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \ 3638585484eSchristos do { \ 3648585484eSchristos void *lock1_tmplock_ = (lock1); \ 3658585484eSchristos void *lock2_tmplock_ = (lock2); \ 3668585484eSchristos EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \ 3678585484eSchristos if (lock2_tmplock_ != lock1_tmplock_) \ 3688585484eSchristos EVLOCK_UNLOCK(lock2_tmplock_,mode2); \ 3698585484eSchristos EVLOCK_UNLOCK(lock1_tmplock_,mode1); \ 3708585484eSchristos } while (0) 3718585484eSchristos 372*eabc0478Schristos EVENT2_EXPORT_SYMBOL 3738585484eSchristos int evthread_is_debug_lock_held_(void *lock); 3748585484eSchristos void *evthread_debug_get_real_lock_(void *lock); 3758585484eSchristos 3768585484eSchristos void *evthread_setup_global_lock_(void *lock_, unsigned locktype, 3778585484eSchristos int enable_locks); 3788585484eSchristos 3798585484eSchristos #define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \ 3808585484eSchristos do { \ 3818585484eSchristos lockvar = evthread_setup_global_lock_(lockvar, \ 3828585484eSchristos (locktype), enable_locks); \ 3838585484eSchristos if (!lockvar) { \ 3848585484eSchristos event_warn("Couldn't allocate %s", #lockvar); \ 3858585484eSchristos return -1; \ 3868585484eSchristos } \ 3878585484eSchristos } while (0); 3888585484eSchristos 3898585484eSchristos int event_global_setup_locks_(const int enable_locks); 3908585484eSchristos int evsig_global_setup_locks_(const int enable_locks); 3918585484eSchristos int evutil_global_setup_locks_(const int enable_locks); 3928585484eSchristos int evutil_secure_rng_global_setup_locks_(const int enable_locks); 3938585484eSchristos 3947476e6e4Schristos /** Return current evthread_lock_callbacks */ 395*eabc0478Schristos EVENT2_EXPORT_SYMBOL 3967476e6e4Schristos struct evthread_lock_callbacks *evthread_get_lock_callbacks(void); 3977476e6e4Schristos /** Return current evthread_condition_callbacks */ 3987476e6e4Schristos struct evthread_condition_callbacks *evthread_get_condition_callbacks(void); 3997476e6e4Schristos /** Disable locking for internal usage (like global shutdown) */ 4007476e6e4Schristos void evthreadimpl_disable_lock_debugging_(void); 4017476e6e4Schristos 4028585484eSchristos #endif 4038585484eSchristos 4048585484eSchristos #ifdef __cplusplus 4058585484eSchristos } 4068585484eSchristos #endif 4078585484eSchristos 4088585484eSchristos #endif /* EVTHREAD_INTERNAL_H_INCLUDED_ */ 409