16d5d786fSAlexander Kabaev /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 3e6209940SPedro F. Giffuni * 46d5d786fSAlexander Kabaev * Copyright 1999, 2000 John D. Polstra. 56d5d786fSAlexander Kabaev * All rights reserved. 66d5d786fSAlexander Kabaev * 76d5d786fSAlexander Kabaev * Redistribution and use in source and binary forms, with or without 86d5d786fSAlexander Kabaev * modification, are permitted provided that the following conditions 96d5d786fSAlexander Kabaev * are met: 106d5d786fSAlexander Kabaev * 1. Redistributions of source code must retain the above copyright 116d5d786fSAlexander Kabaev * notice, this list of conditions and the following disclaimer. 126d5d786fSAlexander Kabaev * 2. Redistributions in binary form must reproduce the above copyright 136d5d786fSAlexander Kabaev * notice, this list of conditions and the following disclaimer in the 146d5d786fSAlexander Kabaev * documentation and/or other materials provided with the distribution. 156d5d786fSAlexander Kabaev * 166d5d786fSAlexander Kabaev * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 176d5d786fSAlexander Kabaev * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 186d5d786fSAlexander Kabaev * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 196d5d786fSAlexander Kabaev * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 206d5d786fSAlexander Kabaev * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 216d5d786fSAlexander Kabaev * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 226d5d786fSAlexander Kabaev * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 236d5d786fSAlexander Kabaev * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 246d5d786fSAlexander Kabaev * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 256d5d786fSAlexander Kabaev * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 266d5d786fSAlexander Kabaev * 276d5d786fSAlexander Kabaev * from: FreeBSD: src/libexec/rtld-elf/sparc64/lockdflt.c,v 1.3 2002/10/09 286d5d786fSAlexander Kabaev */ 296d5d786fSAlexander Kabaev 306d5d786fSAlexander Kabaev /* 316d5d786fSAlexander Kabaev * Thread locking implementation for the dynamic linker. 326d5d786fSAlexander Kabaev * 336d5d786fSAlexander Kabaev * We use the "simple, non-scalable reader-preference lock" from: 346d5d786fSAlexander Kabaev * 356d5d786fSAlexander Kabaev * J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer 366d5d786fSAlexander Kabaev * Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on 376d5d786fSAlexander Kabaev * Principles and Practice of Parallel Programming, April 1991. 386d5d786fSAlexander Kabaev * 396d5d786fSAlexander Kabaev * In this algorithm the lock is a single word. Its low-order bit is 406d5d786fSAlexander Kabaev * set when a writer holds the lock. The remaining high-order bits 416d5d786fSAlexander Kabaev * contain a count of readers desiring the lock. The algorithm requires 42e7bfd34bSKonstantin Belousov * atomic "compare_and_store" and "add" operations, which we take 43e7bfd34bSKonstantin Belousov * from machine/atomic.h. 446d5d786fSAlexander Kabaev */ 456d5d786fSAlexander Kabaev 463f2f85a6SRobert Watson #include <sys/param.h> 47aef199e5SKonstantin Belousov #include <sys/signalvar.h> 486d5d786fSAlexander Kabaev #include <signal.h> 496d5d786fSAlexander Kabaev #include <stdlib.h> 506d5d786fSAlexander Kabaev #include <time.h> 516d5d786fSAlexander Kabaev 526d5d786fSAlexander Kabaev #include "debug.h" 536d5d786fSAlexander Kabaev #include "rtld.h" 546d5d786fSAlexander Kabaev #include "rtld_machdep.h" 55b54a59f3SAlex Richardson #include "rtld_libc.h" 566d5d786fSAlexander Kabaev 570c4f9ecdSKonstantin Belousov void _rtld_thread_init(struct RtldLockInfo *) __exported; 580c4f9ecdSKonstantin Belousov void _rtld_atfork_pre(int *) __exported; 590c4f9ecdSKonstantin Belousov void _rtld_atfork_post(int *) __exported; 600c4f9ecdSKonstantin Belousov 614d9128daSKonstantin Belousov static char def_dlerror_msg[512]; 62529ab5a7SKonstantin Belousov static int def_dlerror_seen_val = 1; 634d9128daSKonstantin Belousov 644d9128daSKonstantin Belousov static char * 654d9128daSKonstantin Belousov def_dlerror_loc(void) 664d9128daSKonstantin Belousov { 674d9128daSKonstantin Belousov return (def_dlerror_msg); 684d9128daSKonstantin Belousov } 694d9128daSKonstantin Belousov 704d9128daSKonstantin Belousov static int * 714d9128daSKonstantin Belousov def_dlerror_seen(void) 724d9128daSKonstantin Belousov { 734d9128daSKonstantin Belousov return (&def_dlerror_seen_val); 744d9128daSKonstantin Belousov } 754d9128daSKonstantin Belousov 766d5d786fSAlexander Kabaev #define WAFLAG 0x1 /* A writer holds the lock */ 776d5d786fSAlexander Kabaev #define RC_INCR 0x2 /* Adjusts count of readers desiring lock */ 786d5d786fSAlexander Kabaev 796d5d786fSAlexander Kabaev typedef struct Struct_Lock { 80fb6395daSAlexander Kabaev volatile u_int lock; 816d5d786fSAlexander Kabaev void *base; 826d5d786fSAlexander Kabaev } Lock; 836d5d786fSAlexander Kabaev 846d5d786fSAlexander Kabaev static sigset_t fullsigmask, oldsigmask; 85b88a8d3dSKonstantin Belousov static int thread_flag, wnested; 86aef199e5SKonstantin Belousov static uint32_t fsigblock; 876d5d786fSAlexander Kabaev 886d5d786fSAlexander Kabaev static void * 89e7bfd34bSKonstantin Belousov def_lock_create(void) 906d5d786fSAlexander Kabaev { 916d5d786fSAlexander Kabaev void *base; 926d5d786fSAlexander Kabaev char *p; 936d5d786fSAlexander Kabaev uintptr_t r; 946d5d786fSAlexander Kabaev Lock *l; 956d5d786fSAlexander Kabaev 966d5d786fSAlexander Kabaev /* 976d5d786fSAlexander Kabaev * Arrange for the lock to occupy its own cache line. First, we 986d5d786fSAlexander Kabaev * optimistically allocate just a cache line, hoping that malloc 996d5d786fSAlexander Kabaev * will give us a well-aligned block of memory. If that doesn't 1006d5d786fSAlexander Kabaev * work, we allocate a larger block and take a well-aligned cache 1016d5d786fSAlexander Kabaev * line from it. 1026d5d786fSAlexander Kabaev */ 1036d5d786fSAlexander Kabaev base = xmalloc(CACHE_LINE_SIZE); 1047444f54bSKonstantin Belousov p = base; 1056d5d786fSAlexander Kabaev if ((uintptr_t)p % CACHE_LINE_SIZE != 0) { 1066d5d786fSAlexander Kabaev free(base); 1076d5d786fSAlexander Kabaev base = xmalloc(2 * CACHE_LINE_SIZE); 1087444f54bSKonstantin Belousov p = base; 1096d5d786fSAlexander Kabaev if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0) 1106d5d786fSAlexander Kabaev p += CACHE_LINE_SIZE - r; 1116d5d786fSAlexander Kabaev } 1126d5d786fSAlexander Kabaev l = (Lock *)p; 1136d5d786fSAlexander Kabaev l->base = base; 1146d5d786fSAlexander Kabaev l->lock = 0; 1157444f54bSKonstantin Belousov return (l); 1166d5d786fSAlexander Kabaev } 1176d5d786fSAlexander Kabaev 1186d5d786fSAlexander Kabaev static void 1196d5d786fSAlexander Kabaev def_lock_destroy(void *lock) 1206d5d786fSAlexander Kabaev { 1217444f54bSKonstantin Belousov Lock *l = lock; 1226d5d786fSAlexander Kabaev 1236d5d786fSAlexander Kabaev free(l->base); 1246d5d786fSAlexander Kabaev } 1256d5d786fSAlexander Kabaev 1266d5d786fSAlexander Kabaev static void 127aef199e5SKonstantin Belousov sig_fastunblock(void) 128aef199e5SKonstantin Belousov { 129aef199e5SKonstantin Belousov uint32_t oldval; 130aef199e5SKonstantin Belousov 131aef199e5SKonstantin Belousov assert((fsigblock & ~SIGFASTBLOCK_FLAGS) >= SIGFASTBLOCK_INC); 132aef199e5SKonstantin Belousov oldval = atomic_fetchadd_32(&fsigblock, -SIGFASTBLOCK_INC); 133aef199e5SKonstantin Belousov if (oldval == (SIGFASTBLOCK_PEND | SIGFASTBLOCK_INC)) 134aef199e5SKonstantin Belousov __sys_sigfastblock(SIGFASTBLOCK_UNBLOCK, NULL); 135aef199e5SKonstantin Belousov } 136aef199e5SKonstantin Belousov 137a687683bSKonstantin Belousov static bool 138a687683bSKonstantin Belousov def_lock_acquire_set(Lock *l, bool wlock) 1396d5d786fSAlexander Kabaev { 140a687683bSKonstantin Belousov if (wlock) { 141a687683bSKonstantin Belousov if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG)) 142a687683bSKonstantin Belousov return (true); 143a687683bSKonstantin Belousov } else { 144a687683bSKonstantin Belousov atomic_add_acq_int(&l->lock, RC_INCR); 145a687683bSKonstantin Belousov if ((l->lock & WAFLAG) == 0) 146a687683bSKonstantin Belousov return (true); 147a687683bSKonstantin Belousov atomic_add_int(&l->lock, -RC_INCR); 148a687683bSKonstantin Belousov } 149a687683bSKonstantin Belousov return (false); 150a687683bSKonstantin Belousov } 151a687683bSKonstantin Belousov 152a687683bSKonstantin Belousov static void 153a687683bSKonstantin Belousov def_lock_acquire(Lock *l, bool wlock) 154a687683bSKonstantin Belousov { 1556d5d786fSAlexander Kabaev sigset_t tmp_oldsigmask; 1566d5d786fSAlexander Kabaev 157aef199e5SKonstantin Belousov if (ld_fast_sigblock) { 158aef199e5SKonstantin Belousov for (;;) { 159aef199e5SKonstantin Belousov atomic_add_32(&fsigblock, SIGFASTBLOCK_INC); 160a687683bSKonstantin Belousov if (def_lock_acquire_set(l, wlock)) 161aef199e5SKonstantin Belousov break; 162aef199e5SKonstantin Belousov sig_fastunblock(); 163aef199e5SKonstantin Belousov } 164aef199e5SKonstantin Belousov } else { 1656d5d786fSAlexander Kabaev for (;;) { 1666d5d786fSAlexander Kabaev sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask); 167a687683bSKonstantin Belousov if (def_lock_acquire_set(l, wlock)) 1686d5d786fSAlexander Kabaev break; 1696d5d786fSAlexander Kabaev sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL); 1706d5d786fSAlexander Kabaev } 171b88a8d3dSKonstantin Belousov if (atomic_fetchadd_int(&wnested, 1) == 0) 1726d5d786fSAlexander Kabaev oldsigmask = tmp_oldsigmask; 1736d5d786fSAlexander Kabaev } 174aef199e5SKonstantin Belousov } 1756d5d786fSAlexander Kabaev 1766d5d786fSAlexander Kabaev static void 177a687683bSKonstantin Belousov def_rlock_acquire(void *lock) 178a687683bSKonstantin Belousov { 179a687683bSKonstantin Belousov def_lock_acquire(lock, false); 180a687683bSKonstantin Belousov } 181a687683bSKonstantin Belousov 182a687683bSKonstantin Belousov static void 183a687683bSKonstantin Belousov def_wlock_acquire(void *lock) 184a687683bSKonstantin Belousov { 185a687683bSKonstantin Belousov def_lock_acquire(lock, true); 186a687683bSKonstantin Belousov } 187a687683bSKonstantin Belousov 188a687683bSKonstantin Belousov static void 1896d5d786fSAlexander Kabaev def_lock_release(void *lock) 1906d5d786fSAlexander Kabaev { 1917444f54bSKonstantin Belousov Lock *l = lock; 1926d5d786fSAlexander Kabaev 193a687683bSKonstantin Belousov atomic_add_rel_int(&l->lock, -((l->lock & WAFLAG) == 0 ? 194a687683bSKonstantin Belousov RC_INCR : WAFLAG)); 195aef199e5SKonstantin Belousov if (ld_fast_sigblock) 196aef199e5SKonstantin Belousov sig_fastunblock(); 197aef199e5SKonstantin Belousov else if (atomic_fetchadd_int(&wnested, -1) == 1) 1986d5d786fSAlexander Kabaev sigprocmask(SIG_SETMASK, &oldsigmask, NULL); 1996d5d786fSAlexander Kabaev } 2006d5d786fSAlexander Kabaev 2016d5d786fSAlexander Kabaev static int 2026d5d786fSAlexander Kabaev def_thread_set_flag(int mask) 2036d5d786fSAlexander Kabaev { 2046d5d786fSAlexander Kabaev int old_val = thread_flag; 2057444f54bSKonstantin Belousov 2066d5d786fSAlexander Kabaev thread_flag |= mask; 2076d5d786fSAlexander Kabaev return (old_val); 2086d5d786fSAlexander Kabaev } 2096d5d786fSAlexander Kabaev 2106d5d786fSAlexander Kabaev static int 2116d5d786fSAlexander Kabaev def_thread_clr_flag(int mask) 2126d5d786fSAlexander Kabaev { 2136d5d786fSAlexander Kabaev int old_val = thread_flag; 2147444f54bSKonstantin Belousov 2156d5d786fSAlexander Kabaev thread_flag &= ~mask; 2166d5d786fSAlexander Kabaev return (old_val); 2176d5d786fSAlexander Kabaev } 2186d5d786fSAlexander Kabaev 2196d5d786fSAlexander Kabaev /* 2206d5d786fSAlexander Kabaev * Public interface exposed to the rest of the dynamic linker. 2216d5d786fSAlexander Kabaev */ 2224d7f08c8SKonstantin Belousov struct RtldLockInfo lockinfo; 2236d5d786fSAlexander Kabaev static struct RtldLockInfo deflockinfo; 2246d5d786fSAlexander Kabaev 2255908d366SStefan Farfeleder static __inline int 2266d5d786fSAlexander Kabaev thread_mask_set(int mask) 2276d5d786fSAlexander Kabaev { 2287444f54bSKonstantin Belousov return (lockinfo.thread_set_flag(mask)); 2296d5d786fSAlexander Kabaev } 2306d5d786fSAlexander Kabaev 2315908d366SStefan Farfeleder static __inline void 2326d5d786fSAlexander Kabaev thread_mask_clear(int mask) 2336d5d786fSAlexander Kabaev { 2346d5d786fSAlexander Kabaev lockinfo.thread_clr_flag(mask); 2356d5d786fSAlexander Kabaev } 2366d5d786fSAlexander Kabaev 237e91ff25cSKonstantin Belousov #define RTLD_LOCK_CNT 3 2383ab5b6bdSAlex Richardson static struct rtld_lock { 2396d5d786fSAlexander Kabaev void *handle; 2406d5d786fSAlexander Kabaev int mask; 2416d5d786fSAlexander Kabaev } rtld_locks[RTLD_LOCK_CNT]; 2426d5d786fSAlexander Kabaev 2436d5d786fSAlexander Kabaev rtld_lock_t rtld_bind_lock = &rtld_locks[0]; 2446d5d786fSAlexander Kabaev rtld_lock_t rtld_libc_lock = &rtld_locks[1]; 245e91ff25cSKonstantin Belousov rtld_lock_t rtld_phdr_lock = &rtld_locks[2]; 2466d5d786fSAlexander Kabaev 2478569deafSKonstantin Belousov void 2488569deafSKonstantin Belousov rlock_acquire(rtld_lock_t lock, RtldLockState *lockstate) 2496d5d786fSAlexander Kabaev { 2508569deafSKonstantin Belousov 2518569deafSKonstantin Belousov if (lockstate == NULL) 2528569deafSKonstantin Belousov return; 2538569deafSKonstantin Belousov 254d8b04dc0SDavid Xu if (thread_mask_set(lock->mask) & lock->mask) { 2556d5d786fSAlexander Kabaev dbg("rlock_acquire: recursed"); 2568569deafSKonstantin Belousov lockstate->lockstate = RTLD_LOCK_UNLOCKED; 2578569deafSKonstantin Belousov return; 2586d5d786fSAlexander Kabaev } 2596d5d786fSAlexander Kabaev lockinfo.rlock_acquire(lock->handle); 2608569deafSKonstantin Belousov lockstate->lockstate = RTLD_LOCK_RLOCKED; 2616d5d786fSAlexander Kabaev } 2626d5d786fSAlexander Kabaev 2638569deafSKonstantin Belousov void 2648569deafSKonstantin Belousov wlock_acquire(rtld_lock_t lock, RtldLockState *lockstate) 2656d5d786fSAlexander Kabaev { 2668569deafSKonstantin Belousov 2678569deafSKonstantin Belousov if (lockstate == NULL) 2688569deafSKonstantin Belousov return; 2698569deafSKonstantin Belousov 270d8b04dc0SDavid Xu if (thread_mask_set(lock->mask) & lock->mask) { 2716d5d786fSAlexander Kabaev dbg("wlock_acquire: recursed"); 2728569deafSKonstantin Belousov lockstate->lockstate = RTLD_LOCK_UNLOCKED; 2738569deafSKonstantin Belousov return; 2746d5d786fSAlexander Kabaev } 2756d5d786fSAlexander Kabaev lockinfo.wlock_acquire(lock->handle); 2768569deafSKonstantin Belousov lockstate->lockstate = RTLD_LOCK_WLOCKED; 2776d5d786fSAlexander Kabaev } 2786d5d786fSAlexander Kabaev 2796d5d786fSAlexander Kabaev void 2808569deafSKonstantin Belousov lock_release(rtld_lock_t lock, RtldLockState *lockstate) 2816d5d786fSAlexander Kabaev { 2828569deafSKonstantin Belousov 2838569deafSKonstantin Belousov if (lockstate == NULL) 2846d5d786fSAlexander Kabaev return; 2858569deafSKonstantin Belousov 2868569deafSKonstantin Belousov switch (lockstate->lockstate) { 2878569deafSKonstantin Belousov case RTLD_LOCK_UNLOCKED: 2888569deafSKonstantin Belousov break; 2898569deafSKonstantin Belousov case RTLD_LOCK_RLOCKED: 2908569deafSKonstantin Belousov case RTLD_LOCK_WLOCKED: 2916d5d786fSAlexander Kabaev thread_mask_clear(lock->mask); 2926d5d786fSAlexander Kabaev lockinfo.lock_release(lock->handle); 2938569deafSKonstantin Belousov break; 2948569deafSKonstantin Belousov default: 2958569deafSKonstantin Belousov assert(0); 2968569deafSKonstantin Belousov } 2976d5d786fSAlexander Kabaev } 2986d5d786fSAlexander Kabaev 2996d5d786fSAlexander Kabaev void 3008569deafSKonstantin Belousov lock_upgrade(rtld_lock_t lock, RtldLockState *lockstate) 3016d5d786fSAlexander Kabaev { 3028569deafSKonstantin Belousov 3038569deafSKonstantin Belousov if (lockstate == NULL) 3046d5d786fSAlexander Kabaev return; 3058569deafSKonstantin Belousov 3068569deafSKonstantin Belousov lock_release(lock, lockstate); 3078569deafSKonstantin Belousov wlock_acquire(lock, lockstate); 3088569deafSKonstantin Belousov } 3098569deafSKonstantin Belousov 3108569deafSKonstantin Belousov void 3118569deafSKonstantin Belousov lock_restart_for_upgrade(RtldLockState *lockstate) 3128569deafSKonstantin Belousov { 3138569deafSKonstantin Belousov 3148569deafSKonstantin Belousov if (lockstate == NULL) 3158569deafSKonstantin Belousov return; 3168569deafSKonstantin Belousov 3178569deafSKonstantin Belousov switch (lockstate->lockstate) { 3188569deafSKonstantin Belousov case RTLD_LOCK_UNLOCKED: 3198569deafSKonstantin Belousov case RTLD_LOCK_WLOCKED: 3208569deafSKonstantin Belousov break; 3218569deafSKonstantin Belousov case RTLD_LOCK_RLOCKED: 322a7bc470aSKonstantin Belousov siglongjmp(lockstate->env, 1); 3238569deafSKonstantin Belousov break; 3248569deafSKonstantin Belousov default: 3258569deafSKonstantin Belousov assert(0); 3268569deafSKonstantin Belousov } 3276d5d786fSAlexander Kabaev } 3286d5d786fSAlexander Kabaev 3296d5d786fSAlexander Kabaev void 3304d9128daSKonstantin Belousov dlerror_dflt_init(void) 3314d9128daSKonstantin Belousov { 3324d9128daSKonstantin Belousov lockinfo.dlerror_loc = def_dlerror_loc; 3334d9128daSKonstantin Belousov lockinfo.dlerror_loc_sz = sizeof(def_dlerror_msg); 3344d9128daSKonstantin Belousov lockinfo.dlerror_seen = def_dlerror_seen; 3354d9128daSKonstantin Belousov } 3364d9128daSKonstantin Belousov 3374d9128daSKonstantin Belousov void 338e7bfd34bSKonstantin Belousov lockdflt_init(void) 3396d5d786fSAlexander Kabaev { 3406d5d786fSAlexander Kabaev int i; 3416d5d786fSAlexander Kabaev 3426d5d786fSAlexander Kabaev deflockinfo.rtli_version = RTLI_VERSION; 3436d5d786fSAlexander Kabaev deflockinfo.lock_create = def_lock_create; 3446d5d786fSAlexander Kabaev deflockinfo.lock_destroy = def_lock_destroy; 3456d5d786fSAlexander Kabaev deflockinfo.rlock_acquire = def_rlock_acquire; 3466d5d786fSAlexander Kabaev deflockinfo.wlock_acquire = def_wlock_acquire; 3476d5d786fSAlexander Kabaev deflockinfo.lock_release = def_lock_release; 3486d5d786fSAlexander Kabaev deflockinfo.thread_set_flag = def_thread_set_flag; 3496d5d786fSAlexander Kabaev deflockinfo.thread_clr_flag = def_thread_clr_flag; 3506d5d786fSAlexander Kabaev deflockinfo.at_fork = NULL; 3514d9128daSKonstantin Belousov deflockinfo.dlerror_loc = def_dlerror_loc; 3524d9128daSKonstantin Belousov deflockinfo.dlerror_loc_sz = sizeof(def_dlerror_msg); 3534d9128daSKonstantin Belousov deflockinfo.dlerror_seen = def_dlerror_seen; 3546d5d786fSAlexander Kabaev 3556d5d786fSAlexander Kabaev for (i = 0; i < RTLD_LOCK_CNT; i++) { 3566d5d786fSAlexander Kabaev rtld_locks[i].mask = (1 << i); 3576d5d786fSAlexander Kabaev rtld_locks[i].handle = NULL; 3586d5d786fSAlexander Kabaev } 3596d5d786fSAlexander Kabaev 3606d5d786fSAlexander Kabaev memcpy(&lockinfo, &deflockinfo, sizeof(lockinfo)); 3616d5d786fSAlexander Kabaev _rtld_thread_init(NULL); 362aef199e5SKonstantin Belousov if (ld_fast_sigblock) { 363aef199e5SKonstantin Belousov __sys_sigfastblock(SIGFASTBLOCK_SETPTR, &fsigblock); 364aef199e5SKonstantin Belousov } else { 3656d5d786fSAlexander Kabaev /* 366aef199e5SKonstantin Belousov * Construct a mask to block all signals. Note that 367aef199e5SKonstantin Belousov * blocked traps mean that the process is terminated 368aef199e5SKonstantin Belousov * if trap occurs while we are in locked section, with 369aef199e5SKonstantin Belousov * the default settings for kern.forcesigexit. 3706d5d786fSAlexander Kabaev */ 3716d5d786fSAlexander Kabaev sigfillset(&fullsigmask); 372aef199e5SKonstantin Belousov } 3736d5d786fSAlexander Kabaev } 3746d5d786fSAlexander Kabaev 3756d5d786fSAlexander Kabaev /* 3766d5d786fSAlexander Kabaev * Callback function to allow threads implementation to 3776d5d786fSAlexander Kabaev * register their own locking primitives if the default 3786d5d786fSAlexander Kabaev * one is not suitable. 3796d5d786fSAlexander Kabaev * The current context should be the only context 3806d5d786fSAlexander Kabaev * executing at the invocation time. 3816d5d786fSAlexander Kabaev */ 3826d5d786fSAlexander Kabaev void 3836d5d786fSAlexander Kabaev _rtld_thread_init(struct RtldLockInfo *pli) 3846d5d786fSAlexander Kabaev { 38508bfbd43SKonstantin Belousov const Obj_Entry *obj; 38608bfbd43SKonstantin Belousov SymLook req; 3876d5d786fSAlexander Kabaev void *locks[RTLD_LOCK_CNT]; 38808bfbd43SKonstantin Belousov int flags, i, res; 38908bfbd43SKonstantin Belousov 39008bfbd43SKonstantin Belousov if (pli == NULL) { 39108bfbd43SKonstantin Belousov lockinfo.rtli_version = RTLI_VERSION; 39208bfbd43SKonstantin Belousov } else { 39308bfbd43SKonstantin Belousov lockinfo.rtli_version = RTLI_VERSION_ONE; 39408bfbd43SKonstantin Belousov obj = obj_from_addr(pli->lock_create); 39508bfbd43SKonstantin Belousov if (obj != NULL) { 39608bfbd43SKonstantin Belousov symlook_init(&req, "_pli_rtli_version"); 39708bfbd43SKonstantin Belousov res = symlook_obj(&req, obj); 39808bfbd43SKonstantin Belousov if (res == 0) 39908bfbd43SKonstantin Belousov lockinfo.rtli_version = pli->rtli_version; 40008bfbd43SKonstantin Belousov } 40108bfbd43SKonstantin Belousov } 4026d5d786fSAlexander Kabaev 4036d5d786fSAlexander Kabaev /* disable all locking while this function is running */ 4046d5d786fSAlexander Kabaev flags = thread_mask_set(~0); 4056d5d786fSAlexander Kabaev 4066d5d786fSAlexander Kabaev if (pli == NULL) 4076d5d786fSAlexander Kabaev pli = &deflockinfo; 408aef199e5SKonstantin Belousov else if (ld_fast_sigblock) { 409aef199e5SKonstantin Belousov fsigblock = 0; 410aef199e5SKonstantin Belousov __sys_sigfastblock(SIGFASTBLOCK_UNSETPTR, NULL); 411aef199e5SKonstantin Belousov } 4126d5d786fSAlexander Kabaev 4136d5d786fSAlexander Kabaev for (i = 0; i < RTLD_LOCK_CNT; i++) 4146d5d786fSAlexander Kabaev if ((locks[i] = pli->lock_create()) == NULL) 4156d5d786fSAlexander Kabaev break; 4166d5d786fSAlexander Kabaev 4176d5d786fSAlexander Kabaev if (i < RTLD_LOCK_CNT) { 4186d5d786fSAlexander Kabaev while (--i >= 0) 4196d5d786fSAlexander Kabaev pli->lock_destroy(locks[i]); 4206d5d786fSAlexander Kabaev abort(); 4216d5d786fSAlexander Kabaev } 4226d5d786fSAlexander Kabaev 4236d5d786fSAlexander Kabaev for (i = 0; i < RTLD_LOCK_CNT; i++) { 4246d5d786fSAlexander Kabaev if (rtld_locks[i].handle == NULL) 4256d5d786fSAlexander Kabaev continue; 4266d5d786fSAlexander Kabaev if (flags & rtld_locks[i].mask) 4276d5d786fSAlexander Kabaev lockinfo.lock_release(rtld_locks[i].handle); 4286d5d786fSAlexander Kabaev lockinfo.lock_destroy(rtld_locks[i].handle); 4296d5d786fSAlexander Kabaev } 4306d5d786fSAlexander Kabaev 4316d5d786fSAlexander Kabaev for (i = 0; i < RTLD_LOCK_CNT; i++) { 4326d5d786fSAlexander Kabaev rtld_locks[i].handle = locks[i]; 4336d5d786fSAlexander Kabaev if (flags & rtld_locks[i].mask) 4346d5d786fSAlexander Kabaev pli->wlock_acquire(rtld_locks[i].handle); 4356d5d786fSAlexander Kabaev } 4366d5d786fSAlexander Kabaev 4376d5d786fSAlexander Kabaev lockinfo.lock_create = pli->lock_create; 4386d5d786fSAlexander Kabaev lockinfo.lock_destroy = pli->lock_destroy; 4396d5d786fSAlexander Kabaev lockinfo.rlock_acquire = pli->rlock_acquire; 4406d5d786fSAlexander Kabaev lockinfo.wlock_acquire = pli->wlock_acquire; 4416d5d786fSAlexander Kabaev lockinfo.lock_release = pli->lock_release; 4426d5d786fSAlexander Kabaev lockinfo.thread_set_flag = pli->thread_set_flag; 4436d5d786fSAlexander Kabaev lockinfo.thread_clr_flag = pli->thread_clr_flag; 4446d5d786fSAlexander Kabaev lockinfo.at_fork = pli->at_fork; 4454d9128daSKonstantin Belousov if (lockinfo.rtli_version > RTLI_VERSION_ONE && pli != NULL) { 4464d9128daSKonstantin Belousov strlcpy(pli->dlerror_loc(), lockinfo.dlerror_loc(), 4474d9128daSKonstantin Belousov lockinfo.dlerror_loc_sz); 4484d9128daSKonstantin Belousov lockinfo.dlerror_loc = pli->dlerror_loc; 4494d9128daSKonstantin Belousov lockinfo.dlerror_loc_sz = pli->dlerror_loc_sz; 4504d9128daSKonstantin Belousov lockinfo.dlerror_seen = pli->dlerror_seen; 4514d9128daSKonstantin Belousov } 4526d5d786fSAlexander Kabaev 4536d5d786fSAlexander Kabaev /* restore thread locking state, this time with new locks */ 4546d5d786fSAlexander Kabaev thread_mask_clear(~0); 4556d5d786fSAlexander Kabaev thread_mask_set(flags); 4566d5d786fSAlexander Kabaev dbg("_rtld_thread_init: done"); 4576d5d786fSAlexander Kabaev } 458cb5c4b10SKonstantin Belousov 459cb5c4b10SKonstantin Belousov void 460cb5c4b10SKonstantin Belousov _rtld_atfork_pre(int *locks) 461cb5c4b10SKonstantin Belousov { 4628569deafSKonstantin Belousov RtldLockState ls[2]; 463cb5c4b10SKonstantin Belousov 4641c70d007SKonstantin Belousov if (locks == NULL) 4651c70d007SKonstantin Belousov return; 466*860c4d94SKonstantin Belousov bzero(ls, sizeof(ls)); 4671c70d007SKonstantin Belousov 4681c70d007SKonstantin Belousov /* 469b88a8d3dSKonstantin Belousov * Warning: this did not worked well with the rtld compat 470b88a8d3dSKonstantin Belousov * locks above, when the thread signal mask was corrupted (set 471b88a8d3dSKonstantin Belousov * to all signals blocked) if two locks were taken 472b88a8d3dSKonstantin Belousov * simultaneously in the write mode. The caller of the 473b88a8d3dSKonstantin Belousov * _rtld_atfork_pre() must provide the working implementation 474b88a8d3dSKonstantin Belousov * of the locks anyway, and libthr locks are fine. 4751c70d007SKonstantin Belousov */ 476*860c4d94SKonstantin Belousov if (ld_get_env_var(LD_NO_DL_ITERATE_PHDR_AFTER_FORK) == NULL) 4778569deafSKonstantin Belousov wlock_acquire(rtld_phdr_lock, &ls[0]); 4781c70d007SKonstantin Belousov wlock_acquire(rtld_bind_lock, &ls[1]); 4798569deafSKonstantin Belousov 4808569deafSKonstantin Belousov /* XXXKIB: I am really sorry for this. */ 4818569deafSKonstantin Belousov locks[0] = ls[1].lockstate; 4828569deafSKonstantin Belousov locks[2] = ls[0].lockstate; 483cb5c4b10SKonstantin Belousov } 484cb5c4b10SKonstantin Belousov 485cb5c4b10SKonstantin Belousov void 486cb5c4b10SKonstantin Belousov _rtld_atfork_post(int *locks) 487cb5c4b10SKonstantin Belousov { 4888569deafSKonstantin Belousov RtldLockState ls[2]; 489cb5c4b10SKonstantin Belousov 4901c70d007SKonstantin Belousov if (locks == NULL) 4911c70d007SKonstantin Belousov return; 4921c70d007SKonstantin Belousov 4938569deafSKonstantin Belousov bzero(ls, sizeof(ls)); 4948569deafSKonstantin Belousov ls[0].lockstate = locks[2]; 4958569deafSKonstantin Belousov ls[1].lockstate = locks[0]; 4968569deafSKonstantin Belousov lock_release(rtld_bind_lock, &ls[1]); 497*860c4d94SKonstantin Belousov if (ld_get_env_var(LD_NO_DL_ITERATE_PHDR_AFTER_FORK) == NULL) 4988569deafSKonstantin Belousov lock_release(rtld_phdr_lock, &ls[0]); 499cb5c4b10SKonstantin Belousov } 500