1*f66ccdf0Skamil /* $NetBSD: pthread.c,v 1.164 2020/02/08 17:06:03 kamil Exp $ */ 2c62a74e6Sthorpej 3c62a74e6Sthorpej /*- 4edf01486Sad * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008, 2020 5edf01486Sad * The NetBSD Foundation, Inc. 6c62a74e6Sthorpej * All rights reserved. 7c62a74e6Sthorpej * 8c62a74e6Sthorpej * This code is derived from software contributed to The NetBSD Foundation 91ac6a89bSad * by Nathan J. Williams and Andrew Doran. 10c62a74e6Sthorpej * 11c62a74e6Sthorpej * Redistribution and use in source and binary forms, with or without 12c62a74e6Sthorpej * modification, are permitted provided that the following conditions 13c62a74e6Sthorpej * are met: 14c62a74e6Sthorpej * 1. Redistributions of source code must retain the above copyright 15c62a74e6Sthorpej * notice, this list of conditions and the following disclaimer. 16c62a74e6Sthorpej * 2. Redistributions in binary form must reproduce the above copyright 17c62a74e6Sthorpej * notice, this list of conditions and the following disclaimer in the 18c62a74e6Sthorpej * documentation and/or other materials provided with the distribution. 19c62a74e6Sthorpej * 20c62a74e6Sthorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21c62a74e6Sthorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22c62a74e6Sthorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23c62a74e6Sthorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24c62a74e6Sthorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25c62a74e6Sthorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26c62a74e6Sthorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27c62a74e6Sthorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28c62a74e6Sthorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29c62a74e6Sthorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30c62a74e6Sthorpej * POSSIBILITY OF SUCH DAMAGE. 31c62a74e6Sthorpej */ 32c62a74e6Sthorpej 33f043c0fbSlukem #include <sys/cdefs.h> 34*f66ccdf0Skamil __RCSID("$NetBSD: pthread.c,v 1.164 2020/02/08 17:06:03 kamil Exp $"); 359e287199Sad 369e287199Sad #define __EXPOSE_STACK 1 379e287199Sad 389e287199Sad #include <sys/param.h> 39a5644095Sjoerg #include <sys/exec_elf.h> 409e287199Sad #include <sys/mman.h> 410645d95bSjoerg #include <sys/lwp.h> 4266ac2ffaSad #include <sys/lwpctl.h> 432bf5f9dbSdsl #include <sys/resource.h> 445f391f4aSjoerg #include <sys/sysctl.h> 45aad59997Sjoerg #include <sys/tls.h> 465f391f4aSjoerg #include <uvm/uvm_param.h> 47f043c0fbSlukem 48a5644095Sjoerg #include <assert.h> 49a5644095Sjoerg #include <dlfcn.h> 50c62a74e6Sthorpej #include <err.h> 51c62a74e6Sthorpej #include <errno.h> 52c62a74e6Sthorpej #include <lwp.h> 53c62a74e6Sthorpej #include <signal.h> 548bcff70bSnathanw #include <stdio.h> 55c62a74e6Sthorpej #include <stdlib.h> 56d79b47c3Srmind #include <stddef.h> 57c62a74e6Sthorpej #include <string.h> 580172694eSnathanw #include <syslog.h> 59c62a74e6Sthorpej #include <ucontext.h> 608bcff70bSnathanw #include <unistd.h> 61c62a74e6Sthorpej #include <sched.h> 629e287199Sad 63e5678be8Sjoerg #include "atexit.h" 64c62a74e6Sthorpej #include "pthread.h" 65c62a74e6Sthorpej #include "pthread_int.h" 66082d249aSpooka #include "pthread_makelwp.h" 6771d484f9Schristos #include "reentrant.h" 68c62a74e6Sthorpej 699583eeb2Sad pthread_rwlock_t pthread__alltree_lock = PTHREAD_RWLOCK_INITIALIZER; 70d79b47c3Srmind static rb_tree_t pthread__alltree; 719583eeb2Sad 72d79b47c3Srmind static signed int pthread__cmp(void *, const void *, const void *); 73d79b47c3Srmind 74d79b47c3Srmind static const rb_tree_ops_t pthread__alltree_ops = { 75d79b47c3Srmind .rbto_compare_nodes = pthread__cmp, 76d79b47c3Srmind .rbto_compare_key = pthread__cmp, 77d79b47c3Srmind .rbto_node_offset = offsetof(struct __pthread_st, pt_alltree), 78d79b47c3Srmind .rbto_context = NULL 79d79b47c3Srmind }; 809583eeb2Sad 8161cac435Sad static void pthread__create_tramp(void *); 8250fa8db4Sad static void pthread__initthread(pthread_t); 83b8833ff5Sad static void pthread__scrubthread(pthread_t, char *, int); 849e287199Sad static void pthread__initmain(pthread_t *); 857de9da97Sad static void pthread__fork_callback(void); 86989565f8Sad static void pthread__reap(pthread_t); 87c6409540Schristos static void pthread__child_callback(void); 88c6409540Schristos static void pthread__start(void); 89c62a74e6Sthorpej 9015e9cec1Sad void pthread__init(void); 9115e9cec1Sad 92c62a74e6Sthorpej int pthread__started; 9371d484f9Schristos int __uselibcstub = 1; 94f4fd6b79Sad pthread_mutex_t pthread__deadqueue_lock = PTHREAD_MUTEX_INITIALIZER; 9550fa8db4Sad pthread_queue_t pthread__deadqueue; 96f1b2c1c4Sad pthread_queue_t pthread__allqueue; 97c62a74e6Sthorpej 98c62a74e6Sthorpej static pthread_attr_t pthread_default_attr; 9966ac2ffaSad static lwpctl_t pthread__dummy_lwpctl = { .lc_curcpu = LWPCTL_CPU_NONE }; 100c62a74e6Sthorpej 1010172694eSnathanw enum { 1020172694eSnathanw DIAGASSERT_ABORT = 1<<0, 1030172694eSnathanw DIAGASSERT_STDERR = 1<<1, 1040172694eSnathanw DIAGASSERT_SYSLOG = 1<<2 1050172694eSnathanw }; 106df277271Snathanw 107beaa63b6Sad static int pthread__diagassert; 108df277271Snathanw 109c94f5a91Sad int pthread__concurrency; 110c94f5a91Sad int pthread__nspins; 111ded26025Sad int pthread__unpark_max = PTHREAD__UNPARK_MAX; 1120f48379fSchristos int pthread__dbg; /* set by libpthread_dbg if active */ 113f2f10664Scl 1149e287199Sad /* 1159e287199Sad * We have to initialize the pthread_stack* variables here because 1169e287199Sad * mutexes are used before pthread_init() and thus pthread__initmain() 1179e287199Sad * are called. Since mutexes only save the stack pointer and not a 1189e287199Sad * pointer to the thread data, it is safe to change the mapping from 1199e287199Sad * stack pointer to thread data afterwards. 1209e287199Sad */ 121a5644095Sjoerg size_t pthread__stacksize; 1225f391f4aSjoerg size_t pthread__guardsize; 123a5644095Sjoerg size_t pthread__pagesize; 124841339f0Smanu static struct __pthread_st *pthread__main; 125841339f0Smanu static size_t __pthread_st_size; 1269e287199Sad 127f782e995Sdrochner int _sys___sigprocmask14(int, const sigset_t *, sigset_t *); 128f782e995Sdrochner 129c62a74e6Sthorpej __strong_alias(__libc_thr_self,pthread_self) 1307dc01dbfSthorpej __strong_alias(__libc_thr_create,pthread_create) 1317dc01dbfSthorpej __strong_alias(__libc_thr_exit,pthread_exit) 132c62a74e6Sthorpej __strong_alias(__libc_thr_errno,pthread__errno) 1339e5c8705Snathanw __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate) 134095b25e7Sdrochner __strong_alias(__libc_thr_equal,pthread_equal) 13515e9cec1Sad __strong_alias(__libc_thr_init,pthread__init) 136c62a74e6Sthorpej 137c62a74e6Sthorpej /* 138c62a74e6Sthorpej * Static library kludge. Place a reference to a symbol any library 139c62a74e6Sthorpej * file which does not already have a reference here. 140c62a74e6Sthorpej */ 141c62a74e6Sthorpej extern int pthread__cancel_stub_binder; 142c62a74e6Sthorpej 143c62a74e6Sthorpej void *pthread__static_lib_binder[] = { 144c62a74e6Sthorpej &pthread__cancel_stub_binder, 145c62a74e6Sthorpej pthread_cond_init, 146c62a74e6Sthorpej pthread_mutex_init, 147c62a74e6Sthorpej pthread_rwlock_init, 148c62a74e6Sthorpej pthread_barrier_init, 149c62a74e6Sthorpej pthread_key_create, 150bd9a18b7Snathanw pthread_setspecific, 151c62a74e6Sthorpej }; 152c62a74e6Sthorpej 1532bcb8bf1Sad #define NHASHLOCK 64 1542bcb8bf1Sad 1552bcb8bf1Sad static union hashlock { 1562bcb8bf1Sad pthread_mutex_t mutex; 1572bcb8bf1Sad char pad[64]; 1582bcb8bf1Sad } hashlocks[NHASHLOCK] __aligned(64); 1592bcb8bf1Sad 160c62a74e6Sthorpej /* 161c62a74e6Sthorpej * This needs to be started by the library loading code, before main() 162c62a74e6Sthorpej * gets to run, for various things that use the state of the initial thread 163c62a74e6Sthorpej * to work properly (thread-specific data is an application-visible example; 164c62a74e6Sthorpej * spinlock counts for mutexes is an internal example). 165c62a74e6Sthorpej */ 166c62a74e6Sthorpej void 16715e9cec1Sad pthread__init(void) 168c62a74e6Sthorpej { 169c62a74e6Sthorpej pthread_t first; 1700172694eSnathanw char *p; 1710645d95bSjoerg int i; 1725f391f4aSjoerg int mib[2]; 1735f391f4aSjoerg unsigned int value; 1745f391f4aSjoerg size_t len; 175c62a74e6Sthorpej extern int __isthreaded; 176c62a74e6Sthorpej 177841339f0Smanu /* 178841339f0Smanu * Allocate pthread_keys descriptors before 179841339f0Smanu * reseting __uselibcstub because otherwise 180841339f0Smanu * malloc() will call pthread_keys_create() 181841339f0Smanu * while pthread_keys descriptors are not 182841339f0Smanu * yet allocated. 183841339f0Smanu */ 184cb27e655Schristos pthread__main = pthread_tsd_init(&__pthread_st_size); 185cb27e655Schristos if (pthread__main == NULL) 186cb27e655Schristos err(EXIT_FAILURE, "Cannot allocate pthread storage"); 187841339f0Smanu 18871d484f9Schristos __uselibcstub = 0; 18971d484f9Schristos 190a5644095Sjoerg pthread__pagesize = (size_t)sysconf(_SC_PAGESIZE); 191284dc1a8Schristos pthread__concurrency = (int)sysconf(_SC_NPROCESSORS_CONF); 192c3f8e2eeSad 1935f391f4aSjoerg mib[0] = CTL_VM; 1945f391f4aSjoerg mib[1] = VM_THREAD_GUARD_SIZE; 1955f391f4aSjoerg len = sizeof(value); 1965f391f4aSjoerg if (sysctl(mib, __arraycount(mib), &value, &len, NULL, 0) == 0) 1975f391f4aSjoerg pthread__guardsize = value; 1985f391f4aSjoerg else 1995f391f4aSjoerg pthread__guardsize = pthread__pagesize; 2005f391f4aSjoerg 201c62a74e6Sthorpej /* Initialize locks first; they're needed elsewhere. */ 202b8833ff5Sad pthread__lockprim_init(); 2032bcb8bf1Sad for (i = 0; i < NHASHLOCK; i++) { 2042bcb8bf1Sad pthread_mutex_init(&hashlocks[i].mutex, NULL); 2052bcb8bf1Sad } 206f2f10664Scl 207b8833ff5Sad /* Fetch parameters. */ 2083247035dSad i = (int)_lwp_unpark_all(NULL, 0, NULL); 2093247035dSad if (i == -1) 210841339f0Smanu err(EXIT_FAILURE, "_lwp_unpark_all"); 211ded26025Sad if (i < pthread__unpark_max) 212ded26025Sad pthread__unpark_max = i; 213c62a74e6Sthorpej 214c62a74e6Sthorpej /* Basic data structure setup */ 215c62a74e6Sthorpej pthread_attr_init(&pthread_default_attr); 216f1b2c1c4Sad PTQ_INIT(&pthread__allqueue); 217c62a74e6Sthorpej PTQ_INIT(&pthread__deadqueue); 218d79b47c3Srmind 219d79b47c3Srmind rb_tree_init(&pthread__alltree, &pthread__alltree_ops); 2209e287199Sad 221c62a74e6Sthorpej /* Create the thread structure corresponding to main() */ 222c62a74e6Sthorpej pthread__initmain(&first); 22350fa8db4Sad pthread__initthread(first); 224b8833ff5Sad pthread__scrubthread(first, NULL, 0); 2251ac6a89bSad 2261ac6a89bSad first->pt_lid = _lwp_self(); 227f1b2c1c4Sad PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq); 228d79b47c3Srmind (void)rb_tree_insert_node(&pthread__alltree, first); 229c62a74e6Sthorpej 2307de9da97Sad if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &first->pt_lwpctl) != 0) { 231841339f0Smanu err(EXIT_FAILURE, "_lwp_ctl"); 2327de9da97Sad } 2337de9da97Sad 234c62a74e6Sthorpej /* Start subsystems */ 235c62a74e6Sthorpej PTHREAD_MD_INIT 236c62a74e6Sthorpej 23715e9cec1Sad for (p = pthread__getenv("PTHREAD_DIAGASSERT"); p && *p; p++) { 2380172694eSnathanw switch (*p) { 2390172694eSnathanw case 'a': 2400172694eSnathanw pthread__diagassert |= DIAGASSERT_ABORT; 2410172694eSnathanw break; 2420172694eSnathanw case 'A': 2430172694eSnathanw pthread__diagassert &= ~DIAGASSERT_ABORT; 2440172694eSnathanw break; 2450172694eSnathanw case 'e': 2460172694eSnathanw pthread__diagassert |= DIAGASSERT_STDERR; 2470172694eSnathanw break; 2480172694eSnathanw case 'E': 2490172694eSnathanw pthread__diagassert &= ~DIAGASSERT_STDERR; 2500172694eSnathanw break; 2510172694eSnathanw case 'l': 2520172694eSnathanw pthread__diagassert |= DIAGASSERT_SYSLOG; 2530172694eSnathanw break; 2540172694eSnathanw case 'L': 2550172694eSnathanw pthread__diagassert &= ~DIAGASSERT_SYSLOG; 2560172694eSnathanw break; 257df277271Snathanw } 2580172694eSnathanw } 2590172694eSnathanw 260c62a74e6Sthorpej /* Tell libc that we're here and it should role-play accordingly. */ 2617de9da97Sad pthread_atfork(NULL, NULL, pthread__fork_callback); 262c62a74e6Sthorpej __isthreaded = 1; 263c62a74e6Sthorpej } 264c62a74e6Sthorpej 2652a4cef11Snathanw static void 2667de9da97Sad pthread__fork_callback(void) 2677de9da97Sad { 2683cdda339Senami struct __pthread_st *self = pthread__self(); 2697de9da97Sad 2707de9da97Sad /* lwpctl state is not copied across fork. */ 2713cdda339Senami if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) { 272841339f0Smanu err(EXIT_FAILURE, "_lwp_ctl"); 2737de9da97Sad } 274fc70b598Sexplorer self->pt_lid = _lwp_self(); 2757de9da97Sad } 2767de9da97Sad 2777de9da97Sad static void 2782a4cef11Snathanw pthread__child_callback(void) 2792a4cef11Snathanw { 28066ac2ffaSad 2812a4cef11Snathanw /* 2822a4cef11Snathanw * Clean up data structures that a forked child process might 2832a4cef11Snathanw * trip over. Note that if threads have been created (causing 2842a4cef11Snathanw * this handler to be registered) the standards say that the 2852a4cef11Snathanw * child will trigger undefined behavior if it makes any 2862a4cef11Snathanw * pthread_* calls (or any other calls that aren't 2872a4cef11Snathanw * async-signal-safe), so we don't really have to clean up 2882a4cef11Snathanw * much. Anything that permits some pthread_* calls to work is 2892a4cef11Snathanw * merely being polite. 2902a4cef11Snathanw */ 2912a4cef11Snathanw pthread__started = 0; 2922a4cef11Snathanw } 293c62a74e6Sthorpej 2940e675542Schs static void 295c62a74e6Sthorpej pthread__start(void) 296c62a74e6Sthorpej { 297c62a74e6Sthorpej 298ff14fbf2Snathanw /* 299ff14fbf2Snathanw * Per-process timers are cleared by fork(); despite the 300ff14fbf2Snathanw * various restrictions on fork() and threads, it's legal to 301ff14fbf2Snathanw * fork() before creating any threads. 302ff14fbf2Snathanw */ 3032a4cef11Snathanw pthread_atfork(NULL, NULL, pthread__child_callback); 304c62a74e6Sthorpej } 305c62a74e6Sthorpej 306c62a74e6Sthorpej 307c62a74e6Sthorpej /* General-purpose thread data structure sanitization. */ 30850fa8db4Sad /* ARGSUSED */ 30950fa8db4Sad static void 31050fa8db4Sad pthread__initthread(pthread_t t) 311c62a74e6Sthorpej { 312c62a74e6Sthorpej 31315e9cec1Sad t->pt_self = t; 314c62a74e6Sthorpej t->pt_magic = PT_MAGIC; 315c3f8e2eeSad t->pt_willpark = 0; 316c3f8e2eeSad t->pt_unpark = 0; 3178ccc6e06Sad t->pt_nwaiters = 0; 318c3f8e2eeSad t->pt_sleepobj = NULL; 319c3f8e2eeSad t->pt_signalled = 0; 320b8833ff5Sad t->pt_havespecific = 0; 3218ccc6e06Sad t->pt_early = NULL; 32266ac2ffaSad t->pt_lwpctl = &pthread__dummy_lwpctl; 3231ac6a89bSad 32415e9cec1Sad memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops)); 325f4fd6b79Sad pthread_mutex_init(&t->pt_lock, NULL); 326c62a74e6Sthorpej PTQ_INIT(&t->pt_cleanup_stack); 327c62a74e6Sthorpej } 328c62a74e6Sthorpej 329b8833ff5Sad static void 330b8833ff5Sad pthread__scrubthread(pthread_t t, char *name, int flags) 331b8833ff5Sad { 332b8833ff5Sad 333b8833ff5Sad t->pt_state = PT_STATE_RUNNING; 334b8833ff5Sad t->pt_exitval = NULL; 335b8833ff5Sad t->pt_flags = flags; 336b8833ff5Sad t->pt_cancel = 0; 337b8833ff5Sad t->pt_errno = 0; 338b8833ff5Sad t->pt_name = name; 339b8833ff5Sad t->pt_lid = 0; 340b8833ff5Sad } 341b8833ff5Sad 342a5644095Sjoerg static int 343d3660af7Sjoerg pthread__getstack(pthread_t newthread, const pthread_attr_t *attr) 344a5644095Sjoerg { 345d3660af7Sjoerg void *stackbase, *stackbase2, *redzone; 3461d34190eSjoerg size_t stacksize, guardsize; 347d3660af7Sjoerg bool allocated; 348a5644095Sjoerg 349344a2311Sjoerg if (attr != NULL) { 350344a2311Sjoerg pthread_attr_getstack(attr, &stackbase, &stacksize); 3515f391f4aSjoerg pthread_attr_getguardsize(attr, &guardsize); 352344a2311Sjoerg } else { 353344a2311Sjoerg stackbase = NULL; 354344a2311Sjoerg stacksize = 0; 3555f391f4aSjoerg guardsize = pthread__guardsize; 356344a2311Sjoerg } 357344a2311Sjoerg if (stacksize == 0) 358344a2311Sjoerg stacksize = pthread__stacksize; 359344a2311Sjoerg 360d3660af7Sjoerg if (newthread->pt_stack_allocated) { 361e917deeeSdrochner if (stackbase == NULL && 3625f391f4aSjoerg newthread->pt_stack.ss_size == stacksize && 3635f391f4aSjoerg newthread->pt_guardsize == guardsize) 364d3660af7Sjoerg return 0; 365d3660af7Sjoerg stackbase2 = newthread->pt_stack.ss_sp; 366ac02e870Skamil #ifndef __MACHINE_STACK_GROWS_UP 367d3660af7Sjoerg stackbase2 = (char *)stackbase2 - newthread->pt_guardsize; 368d3660af7Sjoerg #endif 369d3660af7Sjoerg munmap(stackbase2, 370d3660af7Sjoerg newthread->pt_stack.ss_size + newthread->pt_guardsize); 371d3660af7Sjoerg newthread->pt_stack.ss_sp = NULL; 372d3660af7Sjoerg newthread->pt_stack.ss_size = 0; 373d3660af7Sjoerg newthread->pt_guardsize = 0; 374d3660af7Sjoerg newthread->pt_stack_allocated = false; 375d3660af7Sjoerg } 376d3660af7Sjoerg 377d3660af7Sjoerg newthread->pt_stack_allocated = false; 378d3660af7Sjoerg 379344a2311Sjoerg if (stackbase == NULL) { 380d3660af7Sjoerg stacksize = ((stacksize - 1) | (pthread__pagesize - 1)) + 1; 3815f391f4aSjoerg guardsize = ((guardsize - 1) | (pthread__pagesize - 1)) + 1; 3821d34190eSjoerg stackbase = mmap(NULL, stacksize + guardsize, 383a5644095Sjoerg PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, (off_t)0); 384a5644095Sjoerg if (stackbase == MAP_FAILED) 385a5644095Sjoerg return ENOMEM; 386d3660af7Sjoerg allocated = true; 3871d34190eSjoerg } else { 388d3660af7Sjoerg allocated = false; 389344a2311Sjoerg } 390a5644095Sjoerg #ifdef __MACHINE_STACK_GROWS_UP 391d3660af7Sjoerg redzone = (char *)stackbase + stacksize; 392d3660af7Sjoerg stackbase2 = (char *)stackbase; 393a5644095Sjoerg #else 394a5644095Sjoerg redzone = (char *)stackbase; 395d3660af7Sjoerg stackbase2 = (char *)stackbase + guardsize; 396a5644095Sjoerg #endif 397d3660af7Sjoerg if (allocated && guardsize && 398d3660af7Sjoerg mprotect(redzone, guardsize, PROT_NONE) == -1) { 3991d34190eSjoerg munmap(stackbase, stacksize + guardsize); 400a5644095Sjoerg return EPERM; 401a5644095Sjoerg } 402d3660af7Sjoerg newthread->pt_stack.ss_size = stacksize; 403d3660af7Sjoerg newthread->pt_stack.ss_sp = stackbase2; 404d3660af7Sjoerg newthread->pt_guardsize = guardsize; 405d3660af7Sjoerg newthread->pt_stack_allocated = allocated; 406a5644095Sjoerg return 0; 407a5644095Sjoerg } 408c62a74e6Sthorpej 409c62a74e6Sthorpej int 410c62a74e6Sthorpej pthread_create(pthread_t *thread, const pthread_attr_t *attr, 411c62a74e6Sthorpej void *(*startfunc)(void *), void *arg) 412c62a74e6Sthorpej { 413d9adedd7Sad pthread_t newthread; 414c62a74e6Sthorpej pthread_attr_t nattr; 415b33971b9Sthorpej struct pthread_attr_private *p; 4163ca3d0b1Schristos char * volatile name; 417ed964af1Sad unsigned long flag; 418aad59997Sjoerg void *private_area; 419ed964af1Sad int ret; 420c62a74e6Sthorpej 42171d484f9Schristos if (__predict_false(__uselibcstub)) { 42271d484f9Schristos pthread__errorfunc(__FILE__, __LINE__, __func__, 42371d484f9Schristos "pthread_create() requires linking with -lpthread"); 42471d484f9Schristos return __libc_thr_create_stub(thread, attr, startfunc, arg); 42571d484f9Schristos } 42671d484f9Schristos 427c62a74e6Sthorpej /* 428c62a74e6Sthorpej * It's okay to check this without a lock because there can 429c62a74e6Sthorpej * only be one thread before it becomes true. 430c62a74e6Sthorpej */ 431c62a74e6Sthorpej if (pthread__started == 0) { 432c62a74e6Sthorpej pthread__start(); 433c62a74e6Sthorpej pthread__started = 1; 434c62a74e6Sthorpej } 435c62a74e6Sthorpej 436c62a74e6Sthorpej if (attr == NULL) 437c62a74e6Sthorpej nattr = pthread_default_attr; 438e81f9f17Sdrochner else if (attr->pta_magic == PT_ATTR_MAGIC) 439c62a74e6Sthorpej nattr = *attr; 440c62a74e6Sthorpej else 441c62a74e6Sthorpej return EINVAL; 442c62a74e6Sthorpej 443b33971b9Sthorpej /* Fetch misc. attributes from the attr structure. */ 444508a50acSnathanw name = NULL; 445508a50acSnathanw if ((p = nattr.pta_private) != NULL) 446508a50acSnathanw if (p->ptap_name[0] != '\0') 447b33971b9Sthorpej if ((name = strdup(p->ptap_name)) == NULL) 448b33971b9Sthorpej return ENOMEM; 449c62a74e6Sthorpej 450a014cf23Sad newthread = NULL; 451c62a74e6Sthorpej 452b8833ff5Sad /* 453b8833ff5Sad * Try to reclaim a dead thread. 454b8833ff5Sad */ 455a014cf23Sad if (!PTQ_EMPTY(&pthread__deadqueue)) { 456f4fd6b79Sad pthread_mutex_lock(&pthread__deadqueue_lock); 45785ddadbfSchristos PTQ_FOREACH(newthread, &pthread__deadqueue, pt_deadq) { 458cd1754abSad /* Still busily exiting, or finished? */ 45985ddadbfSchristos if (newthread->pt_lwpctl->lc_curcpu == 460cd1754abSad LWPCTL_CPU_EXITED) 46185ddadbfSchristos break; 46250fa8db4Sad } 46385ddadbfSchristos if (newthread) 46485ddadbfSchristos PTQ_REMOVE(&pthread__deadqueue, newthread, pt_deadq); 465f4fd6b79Sad pthread_mutex_unlock(&pthread__deadqueue_lock); 466928f301bSjoerg #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 467928f301bSjoerg if (newthread && newthread->pt_tls) { 468928f301bSjoerg _rtld_tls_free(newthread->pt_tls); 469928f301bSjoerg newthread->pt_tls = NULL; 470928f301bSjoerg } 471928f301bSjoerg #endif 472a014cf23Sad } 473a014cf23Sad 474b8833ff5Sad /* 475b8833ff5Sad * If necessary set up a stack, allocate space for a pthread_st, 476b8833ff5Sad * and initialize it. 477b8833ff5Sad */ 4784cdc2ed8Syamt if (newthread == NULL) { 479841339f0Smanu newthread = calloc(1, __pthread_st_size); 480a5644095Sjoerg if (newthread == NULL) { 481b7559f85Schristos free(name); 482a5644095Sjoerg return ENOMEM; 483a5644095Sjoerg } 484d3660af7Sjoerg newthread->pt_stack_allocated = false; 485a5644095Sjoerg 486d3660af7Sjoerg if (pthread__getstack(newthread, attr)) { 487a5644095Sjoerg free(newthread); 488a5644095Sjoerg free(name); 489a5644095Sjoerg return ENOMEM; 490c62a74e6Sthorpej } 491b33971b9Sthorpej 492928f301bSjoerg #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 493928f301bSjoerg newthread->pt_tls = NULL; 494928f301bSjoerg #endif 495b8833ff5Sad 496b8833ff5Sad /* Add to list of all threads. */ 4979583eeb2Sad pthread_rwlock_wrlock(&pthread__alltree_lock); 498f1b2c1c4Sad PTQ_INSERT_TAIL(&pthread__allqueue, newthread, pt_allq); 499d79b47c3Srmind (void)rb_tree_insert_node(&pthread__alltree, newthread); 5009583eeb2Sad pthread_rwlock_unlock(&pthread__alltree_lock); 501b8833ff5Sad 502b8833ff5Sad /* Will be reset by the thread upon exit. */ 503b8833ff5Sad pthread__initthread(newthread); 504d3660af7Sjoerg } else { 505d3660af7Sjoerg if (pthread__getstack(newthread, attr)) { 506d3660af7Sjoerg pthread_mutex_lock(&pthread__deadqueue_lock); 507d3660af7Sjoerg PTQ_INSERT_TAIL(&pthread__deadqueue, newthread, pt_deadq); 508d3660af7Sjoerg pthread_mutex_unlock(&pthread__deadqueue_lock); 509d3660af7Sjoerg return ENOMEM; 510d3660af7Sjoerg } 511ed964af1Sad } 512ed964af1Sad 513b8833ff5Sad /* 514b8833ff5Sad * Create the new LWP. 515b8833ff5Sad */ 516b8833ff5Sad pthread__scrubthread(newthread, name, nattr.pta_flags); 51761cac435Sad newthread->pt_func = startfunc; 51861cac435Sad newthread->pt_arg = arg; 519aad59997Sjoerg #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 520aad59997Sjoerg private_area = newthread->pt_tls = _rtld_tls_allocate(); 521aad59997Sjoerg newthread->pt_tls->tcb_pthread = newthread; 522aad59997Sjoerg #else 523aad59997Sjoerg private_area = newthread; 524aad59997Sjoerg #endif 525aad59997Sjoerg 526cd1754abSad flag = 0; 527cbd43ffaSad if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0 || 5280e006eebSad (nattr.pta_flags & PT_FLAG_EXPLICIT_SCHED) != 0) 529ded26025Sad flag |= LWP_SUSPENDED; 530cd1754abSad if ((newthread->pt_flags & PT_FLAG_DETACHED) != 0) 531cd1754abSad flag |= LWP_DETACHED; 532082d249aSpooka 533082d249aSpooka ret = pthread__makelwp(pthread__create_tramp, newthread, private_area, 534082d249aSpooka newthread->pt_stack.ss_sp, newthread->pt_stack.ss_size, 535082d249aSpooka flag, &newthread->pt_lid); 5361ac6a89bSad if (ret != 0) { 537b0ce37c2Sdrochner ret = errno; 5380dcf29f9Srmind pthread_mutex_lock(&newthread->pt_lock); 5390dcf29f9Srmind /* Will unlock and free name. */ 5400dcf29f9Srmind pthread__reap(newthread); 5411ac6a89bSad return ret; 5421ac6a89bSad } 5431ac6a89bSad 5440e006eebSad if ((nattr.pta_flags & PT_FLAG_EXPLICIT_SCHED) != 0) { 545cbd43ffaSad if (p != NULL) { 546cbd43ffaSad (void)pthread_setschedparam(newthread, p->ptap_policy, 547cbd43ffaSad &p->ptap_sp); 548cbd43ffaSad } 549cbd43ffaSad if ((newthread->pt_flags & PT_FLAG_SUSPENDED) == 0) { 550cbd43ffaSad (void)_lwp_continue(newthread->pt_lid); 551cbd43ffaSad } 552cbd43ffaSad } 553cbd43ffaSad 554c62a74e6Sthorpej *thread = newthread; 555c62a74e6Sthorpej 556c62a74e6Sthorpej return 0; 557c62a74e6Sthorpej } 558c62a74e6Sthorpej 559c62a74e6Sthorpej 56067f518f4Sjoerg __dead static void 56161cac435Sad pthread__create_tramp(void *cookie) 562c62a74e6Sthorpej { 56361cac435Sad pthread_t self; 564c62a74e6Sthorpej void *retval; 565c62a74e6Sthorpej 56661cac435Sad self = cookie; 56766ac2ffaSad 56850fa8db4Sad /* 56950fa8db4Sad * Throw away some stack in a feeble attempt to reduce cache 57050fa8db4Sad * thrash. May help for SMT processors. XXX We should not 57150fa8db4Sad * be allocating stacks on fixed 2MB boundaries. Needs a 57261cac435Sad * thread register or decent thread local storage. 57350fa8db4Sad */ 574f63239c2Sad (void)alloca(((unsigned)self->pt_lid & 7) << 8); 575f63239c2Sad 576f63239c2Sad if (self->pt_name != NULL) { 577f63239c2Sad pthread_mutex_lock(&self->pt_lock); 578f63239c2Sad if (self->pt_name != NULL) 57966ac2ffaSad (void)_lwp_setname(0, self->pt_name); 580f63239c2Sad pthread_mutex_unlock(&self->pt_lock); 581f63239c2Sad } 58250fa8db4Sad 583eceac52fSad if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) { 584841339f0Smanu err(EXIT_FAILURE, "_lwp_ctl"); 585eceac52fSad } 58666ac2ffaSad 58761cac435Sad retval = (*self->pt_func)(self->pt_arg); 588c62a74e6Sthorpej 589c62a74e6Sthorpej pthread_exit(retval); 590c62a74e6Sthorpej 591143f5a27Schristos /*NOTREACHED*/ 592143f5a27Schristos pthread__abort(); 593c62a74e6Sthorpej } 594c62a74e6Sthorpej 59538b1c6f4Schristos int 59638b1c6f4Schristos pthread_suspend_np(pthread_t thread) 59738b1c6f4Schristos { 598ba70e96aSchs pthread_t self; 599ba70e96aSchs 60020668e14Skamil pthread__error(EINVAL, "Invalid thread", 60120668e14Skamil thread->pt_magic == PT_MAGIC); 60220668e14Skamil 603ba70e96aSchs self = pthread__self(); 60438b1c6f4Schristos if (self == thread) { 60538b1c6f4Schristos return EDEADLK; 60638b1c6f4Schristos } 607d9adedd7Sad if (pthread__find(thread) != 0) 608ba70e96aSchs return ESRCH; 60940724da2Sad if (_lwp_suspend(thread->pt_lid) == 0) 61040724da2Sad return 0; 61140724da2Sad return errno; 61238b1c6f4Schristos } 61338b1c6f4Schristos 61438b1c6f4Schristos int 61538b1c6f4Schristos pthread_resume_np(pthread_t thread) 61638b1c6f4Schristos { 61738b1c6f4Schristos 61820668e14Skamil pthread__error(EINVAL, "Invalid thread", 61920668e14Skamil thread->pt_magic == PT_MAGIC); 62020668e14Skamil 621d9adedd7Sad if (pthread__find(thread) != 0) 622ba70e96aSchs return ESRCH; 62340724da2Sad if (_lwp_continue(thread->pt_lid) == 0) 62440724da2Sad return 0; 62540724da2Sad return errno; 62638b1c6f4Schristos } 62738b1c6f4Schristos 628047ca71bSad /* 629047ca71bSad * In case the thread is exiting at an inopportune time leaving waiters not 630047ca71bSad * awoken (because cancelled, for instance) make sure we have no waiters 631047ca71bSad * left. 632047ca71bSad */ 633047ca71bSad static void 634047ca71bSad pthread__clear_waiters(pthread_t self) 635047ca71bSad { 636047ca71bSad 637047ca71bSad if (self->pt_nwaiters != 0) { 638047ca71bSad (void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters, 639047ca71bSad NULL); 640047ca71bSad self->pt_nwaiters = 0; 641047ca71bSad } 642047ca71bSad self->pt_willpark = 0; 643047ca71bSad } 644047ca71bSad 645c62a74e6Sthorpej void 646c62a74e6Sthorpej pthread_exit(void *retval) 647c62a74e6Sthorpej { 64896b5a26dSnathanw pthread_t self; 649c62a74e6Sthorpej struct pt_clean_t *cleanup; 650c62a74e6Sthorpej 65171d484f9Schristos if (__predict_false(__uselibcstub)) { 65271d484f9Schristos __libc_thr_exit_stub(retval); 65371d484f9Schristos goto out; 65471d484f9Schristos } 65571d484f9Schristos 656c62a74e6Sthorpej self = pthread__self(); 657c62a74e6Sthorpej 658c62a74e6Sthorpej /* Disable cancellability. */ 659f4fd6b79Sad pthread_mutex_lock(&self->pt_lock); 660c62a74e6Sthorpej self->pt_flags |= PT_FLAG_CS_DISABLED; 66166fcc1ceSnathanw self->pt_cancel = 0; 662c62a74e6Sthorpej 663c62a74e6Sthorpej /* Call any cancellation cleanup handlers */ 664989565f8Sad if (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 665989565f8Sad pthread_mutex_unlock(&self->pt_lock); 666c62a74e6Sthorpej while (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 667c62a74e6Sthorpej cleanup = PTQ_FIRST(&self->pt_cleanup_stack); 668c62a74e6Sthorpej PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next); 669c62a74e6Sthorpej (*cleanup->ptc_cleanup)(cleanup->ptc_arg); 670c62a74e6Sthorpej } 671989565f8Sad pthread_mutex_lock(&self->pt_lock); 672989565f8Sad } 673c62a74e6Sthorpej 674e5678be8Sjoerg pthread_mutex_unlock(&self->pt_lock); 675e5678be8Sjoerg __cxa_thread_run_atexit(); 676e5678be8Sjoerg pthread_mutex_lock(&self->pt_lock); 677e5678be8Sjoerg 678c62a74e6Sthorpej /* Perform cleanup of thread-specific data */ 679c62a74e6Sthorpej pthread__destroy_tsd(self); 680c62a74e6Sthorpej 681047ca71bSad /* 682047ca71bSad * Signal our exit. Our stack and pthread_t won't be reused until 683047ca71bSad * pthread_create() can see from kernel info that this LWP is gone. 684047ca71bSad */ 685c62a74e6Sthorpej self->pt_exitval = retval; 6866b2b9c62Syamt if (self->pt_flags & PT_FLAG_DETACHED) { 687cd1754abSad /* pthread__reap() will drop the lock. */ 688cd1754abSad pthread__reap(self); 689047ca71bSad pthread__clear_waiters(self); 6901ac6a89bSad _lwp_exit(); 691c62a74e6Sthorpej } else { 6926b2b9c62Syamt self->pt_state = PT_STATE_ZOMBIE; 693f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 694047ca71bSad pthread__clear_waiters(self); 695b33971b9Sthorpej /* Note: name will be freed by the joiner. */ 6961ac6a89bSad _lwp_exit(); 697c62a74e6Sthorpej } 698c62a74e6Sthorpej 69971d484f9Schristos out: 700143f5a27Schristos /*NOTREACHED*/ 701143f5a27Schristos pthread__abort(); 702c62a74e6Sthorpej exit(1); 703c62a74e6Sthorpej } 704c62a74e6Sthorpej 705c62a74e6Sthorpej 706c62a74e6Sthorpej int 707c62a74e6Sthorpej pthread_join(pthread_t thread, void **valptr) 708c62a74e6Sthorpej { 709c62a74e6Sthorpej pthread_t self; 710c62a74e6Sthorpej 71120668e14Skamil pthread__error(EINVAL, "Invalid thread", 71220668e14Skamil thread->pt_magic == PT_MAGIC); 71320668e14Skamil 714c62a74e6Sthorpej self = pthread__self(); 715c62a74e6Sthorpej 716d9adedd7Sad if (pthread__find(thread) != 0) 717c62a74e6Sthorpej return ESRCH; 718c62a74e6Sthorpej 719c62a74e6Sthorpej if (thread == self) 720c62a74e6Sthorpej return EDEADLK; 721c62a74e6Sthorpej 722cd1754abSad /* IEEE Std 1003.1 says pthread_join() never returns EINTR. */ 723989565f8Sad for (;;) { 724cd1754abSad pthread__testcancel(self); 725cd1754abSad if (_lwp_wait(thread->pt_lid, NULL) == 0) 726989565f8Sad break; 727cd1754abSad if (errno != EINTR) 728cd1754abSad return errno; 729c94f5a91Sad } 730622bbc50Sad 731cd1754abSad /* 732cd1754abSad * Don't test for cancellation again. The spec is that if 733cd1754abSad * cancelled, pthread_join() must not have succeeded. 734cd1754abSad */ 735cd1754abSad pthread_mutex_lock(&thread->pt_lock); 736cd1754abSad if (thread->pt_state != PT_STATE_ZOMBIE) { 737cd1754abSad pthread__errorfunc(__FILE__, __LINE__, __func__, 738cd1754abSad "not a zombie"); 739c94f5a91Sad } 7401ac6a89bSad if (valptr != NULL) 741ded26025Sad *valptr = thread->pt_exitval; 742cd1754abSad 743989565f8Sad /* pthread__reap() will drop the lock. */ 744989565f8Sad pthread__reap(thread); 745c94f5a91Sad return 0; 746c62a74e6Sthorpej } 747c62a74e6Sthorpej 748989565f8Sad static void 749989565f8Sad pthread__reap(pthread_t thread) 750989565f8Sad { 751989565f8Sad char *name; 752989565f8Sad 753989565f8Sad name = thread->pt_name; 754989565f8Sad thread->pt_name = NULL; 755989565f8Sad thread->pt_state = PT_STATE_DEAD; 756989565f8Sad pthread_mutex_unlock(&thread->pt_lock); 757989565f8Sad 758989565f8Sad pthread_mutex_lock(&pthread__deadqueue_lock); 759989565f8Sad PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_deadq); 760989565f8Sad pthread_mutex_unlock(&pthread__deadqueue_lock); 761989565f8Sad 762989565f8Sad if (name != NULL) 763989565f8Sad free(name); 764989565f8Sad } 765c62a74e6Sthorpej 766c62a74e6Sthorpej int 767c62a74e6Sthorpej pthread_equal(pthread_t t1, pthread_t t2) 768c62a74e6Sthorpej { 76920668e14Skamil 77071d484f9Schristos if (__predict_false(__uselibcstub)) 77171d484f9Schristos return __libc_thr_equal_stub(t1, t2); 772c62a74e6Sthorpej 773*f66ccdf0Skamil pthread__error(0, "Invalid thread", 774*f66ccdf0Skamil (t1 != NULL) && (t1->pt_magic == PT_MAGIC)); 77520668e14Skamil 776*f66ccdf0Skamil pthread__error(0, "Invalid thread", 777*f66ccdf0Skamil (t2 != NULL) && (t2->pt_magic == PT_MAGIC)); 77820668e14Skamil 779c62a74e6Sthorpej /* Nothing special here. */ 780c62a74e6Sthorpej return (t1 == t2); 781c62a74e6Sthorpej } 782c62a74e6Sthorpej 783c62a74e6Sthorpej 784c62a74e6Sthorpej int 785c62a74e6Sthorpej pthread_detach(pthread_t thread) 786c62a74e6Sthorpej { 787cd1754abSad int error; 788c62a74e6Sthorpej 78920668e14Skamil pthread__error(EINVAL, "Invalid thread", 79020668e14Skamil thread->pt_magic == PT_MAGIC); 79120668e14Skamil 792d9adedd7Sad if (pthread__find(thread) != 0) 793c62a74e6Sthorpej return ESRCH; 794c62a74e6Sthorpej 795f4fd6b79Sad pthread_mutex_lock(&thread->pt_lock); 796cd1754abSad if ((thread->pt_flags & PT_FLAG_DETACHED) != 0) { 797cd1754abSad error = EINVAL; 798cd1754abSad } else { 799cd1754abSad error = _lwp_detach(thread->pt_lid); 800cd1754abSad if (error == 0) 8011296e850Sad thread->pt_flags |= PT_FLAG_DETACHED; 802cd1754abSad else 803cd1754abSad error = errno; 804cd1754abSad } 805989565f8Sad if (thread->pt_state == PT_STATE_ZOMBIE) { 806989565f8Sad /* pthread__reap() will drop the lock. */ 807989565f8Sad pthread__reap(thread); 808cd1754abSad } else 809f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 810cd1754abSad return error; 811c62a74e6Sthorpej } 812c62a74e6Sthorpej 813c62a74e6Sthorpej 814c62a74e6Sthorpej int 815b33971b9Sthorpej pthread_getname_np(pthread_t thread, char *name, size_t len) 816c62a74e6Sthorpej { 817c62a74e6Sthorpej 81820668e14Skamil pthread__error(EINVAL, "Invalid thread", 81920668e14Skamil thread->pt_magic == PT_MAGIC); 82020668e14Skamil 821d9adedd7Sad if (pthread__find(thread) != 0) 822b33971b9Sthorpej return ESRCH; 823b33971b9Sthorpej 824f4fd6b79Sad pthread_mutex_lock(&thread->pt_lock); 825b33971b9Sthorpej if (thread->pt_name == NULL) 826b33971b9Sthorpej name[0] = '\0'; 827b33971b9Sthorpej else 828b33971b9Sthorpej strlcpy(name, thread->pt_name, len); 829f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 830c62a74e6Sthorpej 831c62a74e6Sthorpej return 0; 832c62a74e6Sthorpej } 833c62a74e6Sthorpej 834c62a74e6Sthorpej 835c62a74e6Sthorpej int 836b33971b9Sthorpej pthread_setname_np(pthread_t thread, const char *name, void *arg) 837b33971b9Sthorpej { 838b33971b9Sthorpej char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP]; 839b33971b9Sthorpej int namelen; 840b33971b9Sthorpej 84120668e14Skamil pthread__error(EINVAL, "Invalid thread", 84220668e14Skamil thread->pt_magic == PT_MAGIC); 84320668e14Skamil 844d9adedd7Sad if (pthread__find(thread) != 0) 845b33971b9Sthorpej return ESRCH; 846b33971b9Sthorpej 847b33971b9Sthorpej namelen = snprintf(newname, sizeof(newname), name, arg); 848b33971b9Sthorpej if (namelen >= PTHREAD_MAX_NAMELEN_NP) 849b33971b9Sthorpej return EINVAL; 850b33971b9Sthorpej 851b33971b9Sthorpej cp = strdup(newname); 852b33971b9Sthorpej if (cp == NULL) 853b33971b9Sthorpej return ENOMEM; 854b33971b9Sthorpej 855f4fd6b79Sad pthread_mutex_lock(&thread->pt_lock); 856b33971b9Sthorpej oldname = thread->pt_name; 857b33971b9Sthorpej thread->pt_name = cp; 858f63239c2Sad (void)_lwp_setname(thread->pt_lid, cp); 859f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 860b33971b9Sthorpej 861b33971b9Sthorpej if (oldname != NULL) 862b33971b9Sthorpej free(oldname); 863b33971b9Sthorpej 864b33971b9Sthorpej return 0; 865b33971b9Sthorpej } 866b33971b9Sthorpej 867b33971b9Sthorpej 868c62a74e6Sthorpej pthread_t 869c62a74e6Sthorpej pthread_self(void) 870c62a74e6Sthorpej { 87171d484f9Schristos if (__predict_false(__uselibcstub)) 87271d484f9Schristos return (pthread_t)__libc_thr_self_stub(); 873c62a74e6Sthorpej 874c62a74e6Sthorpej return pthread__self(); 875c62a74e6Sthorpej } 876c62a74e6Sthorpej 877c62a74e6Sthorpej 878c62a74e6Sthorpej int 879c62a74e6Sthorpej pthread_cancel(pthread_t thread) 880c62a74e6Sthorpej { 881c62a74e6Sthorpej 88220668e14Skamil pthread__error(EINVAL, "Invalid thread", 88320668e14Skamil thread->pt_magic == PT_MAGIC); 88420668e14Skamil 885d9adedd7Sad if (pthread__find(thread) != 0) 886ba70e96aSchs return ESRCH; 887f4fd6b79Sad pthread_mutex_lock(&thread->pt_lock); 8881ac6a89bSad thread->pt_flags |= PT_FLAG_CS_PENDING; 8891ac6a89bSad if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) { 8901ac6a89bSad thread->pt_cancel = 1; 891f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 8921ac6a89bSad _lwp_wakeup(thread->pt_lid); 8931ac6a89bSad } else 894f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 895c62a74e6Sthorpej 896c62a74e6Sthorpej return 0; 897c62a74e6Sthorpej } 898c62a74e6Sthorpej 899c62a74e6Sthorpej 900c62a74e6Sthorpej int 901c62a74e6Sthorpej pthread_setcancelstate(int state, int *oldstate) 902c62a74e6Sthorpej { 903c62a74e6Sthorpej pthread_t self; 9040878df5dSnathanw int retval; 905c62a74e6Sthorpej 90671d484f9Schristos if (__predict_false(__uselibcstub)) 90771d484f9Schristos return __libc_thr_setcancelstate_stub(state, oldstate); 90871d484f9Schristos 909c62a74e6Sthorpej self = pthread__self(); 9100878df5dSnathanw retval = 0; 911c62a74e6Sthorpej 912f4fd6b79Sad pthread_mutex_lock(&self->pt_lock); 91350fa8db4Sad 914c62a74e6Sthorpej if (oldstate != NULL) { 9150878df5dSnathanw if (self->pt_flags & PT_FLAG_CS_DISABLED) 916c62a74e6Sthorpej *oldstate = PTHREAD_CANCEL_DISABLE; 917c62a74e6Sthorpej else 918c62a74e6Sthorpej *oldstate = PTHREAD_CANCEL_ENABLE; 919c62a74e6Sthorpej } 920c62a74e6Sthorpej 9210878df5dSnathanw if (state == PTHREAD_CANCEL_DISABLE) { 9220878df5dSnathanw self->pt_flags |= PT_FLAG_CS_DISABLED; 9230878df5dSnathanw if (self->pt_cancel) { 9240878df5dSnathanw self->pt_flags |= PT_FLAG_CS_PENDING; 9250878df5dSnathanw self->pt_cancel = 0; 9260878df5dSnathanw } 9270878df5dSnathanw } else if (state == PTHREAD_CANCEL_ENABLE) { 9280878df5dSnathanw self->pt_flags &= ~PT_FLAG_CS_DISABLED; 929c62a74e6Sthorpej /* 930c62a74e6Sthorpej * If a cancellation was requested while cancellation 931c62a74e6Sthorpej * was disabled, note that fact for future 932c62a74e6Sthorpej * cancellation tests. 933c62a74e6Sthorpej */ 9340878df5dSnathanw if (self->pt_flags & PT_FLAG_CS_PENDING) { 935c62a74e6Sthorpej self->pt_cancel = 1; 936c62a74e6Sthorpej /* This is not a deferred cancellation point. */ 9370878df5dSnathanw if (self->pt_flags & PT_FLAG_CS_ASYNC) { 938f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 939989565f8Sad pthread__cancelled(); 940c62a74e6Sthorpej } 9410878df5dSnathanw } 942c62a74e6Sthorpej } else 9430878df5dSnathanw retval = EINVAL; 944c62a74e6Sthorpej 945f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 94650fa8db4Sad 9470878df5dSnathanw return retval; 948c62a74e6Sthorpej } 949c62a74e6Sthorpej 950c62a74e6Sthorpej 951c62a74e6Sthorpej int 952c62a74e6Sthorpej pthread_setcanceltype(int type, int *oldtype) 953c62a74e6Sthorpej { 954c62a74e6Sthorpej pthread_t self; 9550878df5dSnathanw int retval; 956c62a74e6Sthorpej 957c62a74e6Sthorpej self = pthread__self(); 9580878df5dSnathanw retval = 0; 9590878df5dSnathanw 960f4fd6b79Sad pthread_mutex_lock(&self->pt_lock); 961c62a74e6Sthorpej 962c62a74e6Sthorpej if (oldtype != NULL) { 9630878df5dSnathanw if (self->pt_flags & PT_FLAG_CS_ASYNC) 964c62a74e6Sthorpej *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS; 965c62a74e6Sthorpej else 966c62a74e6Sthorpej *oldtype = PTHREAD_CANCEL_DEFERRED; 967c62a74e6Sthorpej } 968c62a74e6Sthorpej 969c62a74e6Sthorpej if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { 9700878df5dSnathanw self->pt_flags |= PT_FLAG_CS_ASYNC; 9710878df5dSnathanw if (self->pt_cancel) { 972f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 973989565f8Sad pthread__cancelled(); 9740878df5dSnathanw } 975c62a74e6Sthorpej } else if (type == PTHREAD_CANCEL_DEFERRED) 9760878df5dSnathanw self->pt_flags &= ~PT_FLAG_CS_ASYNC; 977c62a74e6Sthorpej else 9780878df5dSnathanw retval = EINVAL; 979c62a74e6Sthorpej 980f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 98150fa8db4Sad 9820878df5dSnathanw return retval; 983c62a74e6Sthorpej } 984c62a74e6Sthorpej 985c62a74e6Sthorpej 986c62a74e6Sthorpej void 987989565f8Sad pthread_testcancel(void) 988c62a74e6Sthorpej { 989c62a74e6Sthorpej pthread_t self; 990c62a74e6Sthorpej 991c62a74e6Sthorpej self = pthread__self(); 992c62a74e6Sthorpej if (self->pt_cancel) 993989565f8Sad pthread__cancelled(); 994c62a74e6Sthorpej } 995c62a74e6Sthorpej 996c62a74e6Sthorpej 997c62a74e6Sthorpej /* 998c62a74e6Sthorpej * POSIX requires that certain functions return an error rather than 999c62a74e6Sthorpej * invoking undefined behavior even when handed completely bogus 1000d79b47c3Srmind * pthread_t values, e.g. stack garbage. 1001c62a74e6Sthorpej */ 1002c62a74e6Sthorpej int 1003d9adedd7Sad pthread__find(pthread_t id) 1004c62a74e6Sthorpej { 1005c62a74e6Sthorpej pthread_t target; 1006d79b47c3Srmind int error; 1007c62a74e6Sthorpej 10089583eeb2Sad pthread_rwlock_rdlock(&pthread__alltree_lock); 1009d79b47c3Srmind target = rb_tree_find_node(&pthread__alltree, id); 1010d79b47c3Srmind error = (target && target->pt_state != PT_STATE_DEAD) ? 0 : ESRCH; 10119583eeb2Sad pthread_rwlock_unlock(&pthread__alltree_lock); 1012c62a74e6Sthorpej 1013d79b47c3Srmind return error; 1014c62a74e6Sthorpej } 1015c62a74e6Sthorpej 1016c62a74e6Sthorpej 1017c62a74e6Sthorpej void 1018c62a74e6Sthorpej pthread__testcancel(pthread_t self) 1019c62a74e6Sthorpej { 1020c62a74e6Sthorpej 1021c62a74e6Sthorpej if (self->pt_cancel) 1022989565f8Sad pthread__cancelled(); 1023989565f8Sad } 1024989565f8Sad 1025989565f8Sad 1026989565f8Sad void 1027989565f8Sad pthread__cancelled(void) 1028989565f8Sad { 1029989565f8Sad 1030c62a74e6Sthorpej pthread_exit(PTHREAD_CANCELED); 1031c62a74e6Sthorpej } 1032c62a74e6Sthorpej 1033c62a74e6Sthorpej 1034c62a74e6Sthorpej void 1035c62a74e6Sthorpej pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store) 1036c62a74e6Sthorpej { 1037c62a74e6Sthorpej pthread_t self; 1038c62a74e6Sthorpej struct pt_clean_t *entry; 1039c62a74e6Sthorpej 1040c62a74e6Sthorpej self = pthread__self(); 1041c62a74e6Sthorpej entry = store; 1042c62a74e6Sthorpej entry->ptc_cleanup = cleanup; 1043c62a74e6Sthorpej entry->ptc_arg = arg; 1044c62a74e6Sthorpej PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next); 1045c62a74e6Sthorpej } 1046c62a74e6Sthorpej 1047c62a74e6Sthorpej 1048c62a74e6Sthorpej void 1049c62a74e6Sthorpej pthread__cleanup_pop(int ex, void *store) 1050c62a74e6Sthorpej { 1051c62a74e6Sthorpej pthread_t self; 1052c62a74e6Sthorpej struct pt_clean_t *entry; 1053c62a74e6Sthorpej 1054c62a74e6Sthorpej self = pthread__self(); 1055c62a74e6Sthorpej entry = store; 1056c62a74e6Sthorpej 1057c62a74e6Sthorpej PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next); 1058c62a74e6Sthorpej if (ex) 1059c62a74e6Sthorpej (*entry->ptc_cleanup)(entry->ptc_arg); 1060c62a74e6Sthorpej } 1061c62a74e6Sthorpej 1062c62a74e6Sthorpej 1063783e2f6dSad int * 1064783e2f6dSad pthread__errno(void) 1065783e2f6dSad { 1066783e2f6dSad pthread_t self; 1067783e2f6dSad 106871d484f9Schristos if (__predict_false(__uselibcstub)) { 106971d484f9Schristos pthread__errorfunc(__FILE__, __LINE__, __func__, 107071d484f9Schristos "pthread__errno() requires linking with -lpthread"); 107171d484f9Schristos return __libc_thr_errno_stub(); 107271d484f9Schristos } 107371d484f9Schristos 1074783e2f6dSad self = pthread__self(); 1075783e2f6dSad 1076783e2f6dSad return &(self->pt_errno); 1077783e2f6dSad } 1078783e2f6dSad 10790c967901Snathanw ssize_t _sys_write(int, const void *, size_t); 10800c967901Snathanw 10818bcff70bSnathanw void 10820e6c93b9Sdrochner pthread__assertfunc(const char *file, int line, const char *function, 10830e6c93b9Sdrochner const char *expr) 10848bcff70bSnathanw { 10858bcff70bSnathanw char buf[1024]; 10868bcff70bSnathanw int len; 10878bcff70bSnathanw 10888bcff70bSnathanw /* 10898bcff70bSnathanw * snprintf should not acquire any locks, or we could 10908bcff70bSnathanw * end up deadlocked if the assert caller held locks. 10918bcff70bSnathanw */ 10928bcff70bSnathanw len = snprintf(buf, 1024, 10938bcff70bSnathanw "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n", 10948bcff70bSnathanw expr, file, line, 10958bcff70bSnathanw function ? ", function \"" : "", 10968bcff70bSnathanw function ? function : "", 10978bcff70bSnathanw function ? "\"" : ""); 10988bcff70bSnathanw 10990c967901Snathanw _sys_write(STDERR_FILENO, buf, (size_t)len); 1100fb0af629Sad (void)_lwp_kill(_lwp_self(), SIGABRT); 11018bcff70bSnathanw _exit(1); 11028bcff70bSnathanw } 1103df277271Snathanw 1104df277271Snathanw 1105df277271Snathanw void 11060e6c93b9Sdrochner pthread__errorfunc(const char *file, int line, const char *function, 11070e6c93b9Sdrochner const char *msg) 1108df277271Snathanw { 1109df277271Snathanw char buf[1024]; 11100172694eSnathanw size_t len; 1111df277271Snathanw 11120172694eSnathanw if (pthread__diagassert == 0) 1113df277271Snathanw return; 1114df277271Snathanw 1115df277271Snathanw /* 1116df277271Snathanw * snprintf should not acquire any locks, or we could 1117df277271Snathanw * end up deadlocked if the assert caller held locks. 1118df277271Snathanw */ 1119df277271Snathanw len = snprintf(buf, 1024, 11200172694eSnathanw "%s: Error detected by libpthread: %s.\n" 11210172694eSnathanw "Detected by file \"%s\", line %d%s%s%s.\n" 11220172694eSnathanw "See pthread(3) for information.\n", 11230172694eSnathanw getprogname(), msg, file, line, 1124df277271Snathanw function ? ", function \"" : "", 1125df277271Snathanw function ? function : "", 11260172694eSnathanw function ? "\"" : ""); 1127df277271Snathanw 11280172694eSnathanw if (pthread__diagassert & DIAGASSERT_STDERR) 11290c967901Snathanw _sys_write(STDERR_FILENO, buf, len); 11300172694eSnathanw 11310172694eSnathanw if (pthread__diagassert & DIAGASSERT_SYSLOG) 11320172694eSnathanw syslog(LOG_DEBUG | LOG_USER, "%s", buf); 11330172694eSnathanw 11340172694eSnathanw if (pthread__diagassert & DIAGASSERT_ABORT) { 1135fb0af629Sad (void)_lwp_kill(_lwp_self(), SIGABRT); 1136df277271Snathanw _exit(1); 1137df277271Snathanw } 1138df277271Snathanw } 11391ac6a89bSad 1140fe9718acSad /* 1141ded26025Sad * Thread park/unpark operations. The kernel operations are 1142ded26025Sad * modelled after a brief description from "Multithreading in 1143ded26025Sad * the Solaris Operating Environment": 1144fe9718acSad * 1145fe9718acSad * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf 1146fe9718acSad */ 1147fe9718acSad 11481ac6a89bSad #define OOPS(msg) \ 1149858097f9Schristos pthread__errorfunc(__FILE__, __LINE__, __func__, msg) 11501ac6a89bSad 11511ac6a89bSad int 11522bcb8bf1Sad pthread__park(pthread_t self, pthread_mutex_t *lock, 115350fa8db4Sad pthread_queue_t *queue, const struct timespec *abstime, 115450fa8db4Sad int cancelpt, const void *hint) 11551ac6a89bSad { 1156c3f8e2eeSad int rv, error; 11578ccc6e06Sad void *obj; 11581ac6a89bSad 11592bcb8bf1Sad self->pt_willpark = 1; 11602bcb8bf1Sad pthread_mutex_unlock(lock); 11612bcb8bf1Sad self->pt_willpark = 0; 116266ac2ffaSad 116366ac2ffaSad /* 1164ded26025Sad * Wait until we are awoken by a pending unpark operation, 1165ded26025Sad * a signal, an unpark posted after we have gone asleep, 1166ded26025Sad * or an expired timeout. 116750fa8db4Sad * 11682bcb8bf1Sad * It is fine to test the value of pt_sleepobj without 11692bcb8bf1Sad * holding any locks, because: 117050fa8db4Sad * 117150fa8db4Sad * o Only the blocking thread (this thread) ever sets them 117250fa8db4Sad * to a non-NULL value. 117350fa8db4Sad * 117450fa8db4Sad * o Other threads may set them NULL, but if they do so they 117550fa8db4Sad * must also make this thread return from _lwp_park. 117650fa8db4Sad * 117750fa8db4Sad * o _lwp_park, _lwp_unpark and _lwp_unpark_all are system 117850fa8db4Sad * calls and all make use of spinlocks in the kernel. So 117950fa8db4Sad * these system calls act as full memory barriers, and will 118050fa8db4Sad * ensure that the calling CPU's store buffers are drained. 118150fa8db4Sad * In combination with the spinlock release before unpark, 118250fa8db4Sad * this means that modification of pt_sleepobj/onq by another 118350fa8db4Sad * thread will become globally visible before that thread 118450fa8db4Sad * schedules an unpark operation on this thread. 1185c3f8e2eeSad * 1186c3f8e2eeSad * Note: the test in the while() statement dodges the park op if 1187c3f8e2eeSad * we have already been awoken, unless there is another thread to 1188c3f8e2eeSad * awaken. This saves a syscall - if we were already awakened, 1189c3f8e2eeSad * the next call to _lwp_park() would need to return early in order 1190c3f8e2eeSad * to eat the previous wakeup. 1191ded26025Sad */ 1192ded26025Sad rv = 0; 11932bcb8bf1Sad do { 1194c3f8e2eeSad /* 1195c3f8e2eeSad * If we deferred unparking a thread, arrange to 1196c3f8e2eeSad * have _lwp_park() restart it before blocking. 1197c3f8e2eeSad */ 119885d957d4Skre error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, 119985d957d4Skre __UNCONST(abstime), self->pt_unpark, hint, hint); 1200c3f8e2eeSad self->pt_unpark = 0; 1201c3f8e2eeSad if (error != 0) { 1202ded26025Sad switch (rv = errno) { 12031ac6a89bSad case EINTR: 12041ac6a89bSad case EALREADY: 1205ded26025Sad rv = 0; 1206ded26025Sad break; 1207ded26025Sad case ETIMEDOUT: 12081ac6a89bSad break; 12091ac6a89bSad default: 12101ac6a89bSad OOPS("_lwp_park failed"); 12111ac6a89bSad break; 12121ac6a89bSad } 1213ded26025Sad } 12140c61b6a6Sad /* Check for cancellation. */ 1215d9adedd7Sad if (cancelpt && self->pt_cancel) 12160c61b6a6Sad rv = EINTR; 12172bcb8bf1Sad } while (self->pt_sleepobj != NULL && rv == 0); 12181ac6a89bSad 1219ded26025Sad /* 1220ded26025Sad * If we have been awoken early but are still on the queue, 122150fa8db4Sad * then remove ourself. Again, it's safe to do the test 122250fa8db4Sad * without holding any locks. 1223ded26025Sad */ 12242bcb8bf1Sad if (__predict_false(self->pt_sleepobj != NULL)) { 12252bcb8bf1Sad pthread_mutex_lock(lock); 12262bcb8bf1Sad if ((obj = self->pt_sleepobj) != NULL) { 12271ac6a89bSad PTQ_REMOVE(queue, self, pt_sleep); 12281ac6a89bSad self->pt_sleepobj = NULL; 12298ccc6e06Sad if (obj != NULL && self->pt_early != NULL) 12308ccc6e06Sad (*self->pt_early)(obj); 1231792cc0e1Sad } 12322bcb8bf1Sad pthread_mutex_unlock(lock); 123350fa8db4Sad } 12348ccc6e06Sad self->pt_early = NULL; 12351ac6a89bSad 12361ac6a89bSad return rv; 12371ac6a89bSad } 12381ac6a89bSad 12391ac6a89bSad void 12402bcb8bf1Sad pthread__unpark(pthread_queue_t *queue, pthread_t self, 12412bcb8bf1Sad pthread_mutex_t *interlock) 12421ac6a89bSad { 12432bcb8bf1Sad pthread_t target; 1244c0038aadSmatt u_int max; 1245c0038aadSmatt size_t nwaiters; 12461ac6a89bSad 12472bcb8bf1Sad max = pthread__unpark_max; 12482bcb8bf1Sad nwaiters = self->pt_nwaiters; 12492bcb8bf1Sad target = PTQ_FIRST(queue); 12502bcb8bf1Sad if (nwaiters == max) { 12512bcb8bf1Sad /* Overflow. */ 12522bcb8bf1Sad (void)_lwp_unpark_all(self->pt_waiters, nwaiters, 12532bcb8bf1Sad __UNVOLATILE(&interlock->ptm_waiters)); 12542bcb8bf1Sad nwaiters = 0; 12550c61b6a6Sad } 12561ac6a89bSad target->pt_sleepobj = NULL; 12572bcb8bf1Sad self->pt_waiters[nwaiters++] = target->pt_lid; 12582bcb8bf1Sad PTQ_REMOVE(queue, target, pt_sleep); 12592bcb8bf1Sad self->pt_nwaiters = nwaiters; 12602bcb8bf1Sad pthread__mutex_deferwake(self, interlock); 1261c3f8e2eeSad } 12621ac6a89bSad 12631ac6a89bSad void 12642bcb8bf1Sad pthread__unpark_all(pthread_queue_t *queue, pthread_t self, 12652bcb8bf1Sad pthread_mutex_t *interlock) 12661ac6a89bSad { 12672bcb8bf1Sad pthread_t target; 1268c0038aadSmatt u_int max; 1269c0038aadSmatt size_t nwaiters; 1270ded26025Sad 12712bcb8bf1Sad max = pthread__unpark_max; 12722bcb8bf1Sad nwaiters = self->pt_nwaiters; 12732bcb8bf1Sad PTQ_FOREACH(target, queue, pt_sleep) { 12742bcb8bf1Sad if (nwaiters == max) { 12752bcb8bf1Sad /* Overflow. */ 12762bcb8bf1Sad (void)_lwp_unpark_all(self->pt_waiters, nwaiters, 12772bcb8bf1Sad __UNVOLATILE(&interlock->ptm_waiters)); 12782bcb8bf1Sad nwaiters = 0; 1279ded26025Sad } 12802bcb8bf1Sad target->pt_sleepobj = NULL; 12812bcb8bf1Sad self->pt_waiters[nwaiters++] = target->pt_lid; 12821ac6a89bSad } 12832bcb8bf1Sad self->pt_nwaiters = nwaiters; 12842bcb8bf1Sad PTQ_INIT(queue); 12852bcb8bf1Sad pthread__mutex_deferwake(self, interlock); 12861ac6a89bSad } 12871ac6a89bSad 12881ac6a89bSad #undef OOPS 12899e287199Sad 1290a5644095Sjoerg static void 1291a5644095Sjoerg pthread__initmainstack(void) 12929e287199Sad { 1293a5644095Sjoerg struct rlimit slimit; 1294a5644095Sjoerg const AuxInfo *aux; 12955f391f4aSjoerg size_t size, len; 12965f391f4aSjoerg int mib[2]; 12975f391f4aSjoerg unsigned int value; 12989e287199Sad 1299a5644095Sjoerg _DIAGASSERT(_dlauxinfo() != NULL); 13009e287199Sad 1301a5644095Sjoerg if (getrlimit(RLIMIT_STACK, &slimit) == -1) 1302841339f0Smanu err(EXIT_FAILURE, 1303841339f0Smanu "Couldn't get stack resource consumption limits"); 1304bfcb2008Sjoerg size = slimit.rlim_cur; 1305841339f0Smanu pthread__main->pt_stack.ss_size = size; 13065f391f4aSjoerg pthread__main->pt_guardsize = pthread__pagesize; 13075f391f4aSjoerg 13085f391f4aSjoerg mib[0] = CTL_VM; 13095f391f4aSjoerg mib[1] = VM_GUARD_SIZE; 13105f391f4aSjoerg len = sizeof(value); 13115f391f4aSjoerg if (sysctl(mib, __arraycount(mib), &value, &len, NULL, 0) == 0) 13125f391f4aSjoerg pthread__main->pt_guardsize = value; 13139e287199Sad 1314a5644095Sjoerg for (aux = _dlauxinfo(); aux->a_type != AT_NULL; ++aux) { 1315a5644095Sjoerg if (aux->a_type == AT_STACKBASE) { 1316bfcb2008Sjoerg #ifdef __MACHINE_STACK_GROWS_UP 1317841339f0Smanu pthread__main->pt_stack.ss_sp = (void *)aux->a_v; 1318bfcb2008Sjoerg #else 1319841339f0Smanu pthread__main->pt_stack.ss_sp = (char *)aux->a_v - size; 1320bfcb2008Sjoerg #endif 1321a5644095Sjoerg break; 13229e287199Sad } 1323a5644095Sjoerg } 1324939c050dSchristos pthread__copy_tsd(pthread__main); 1325a5644095Sjoerg } 13269e287199Sad 13279e287199Sad /* 13289e287199Sad * Set up the slightly special stack for the "initial" thread, which 13299e287199Sad * runs on the normal system stack, and thus gets slightly different 13309e287199Sad * treatment. 13319e287199Sad */ 13329e287199Sad static void 13339e287199Sad pthread__initmain(pthread_t *newt) 13349e287199Sad { 13359e287199Sad char *value; 13369e287199Sad 1337a5644095Sjoerg pthread__initmainstack(); 133815e9cec1Sad 133915e9cec1Sad value = pthread__getenv("PTHREAD_STACKSIZE"); 134015e9cec1Sad if (value != NULL) { 13419e287199Sad pthread__stacksize = atoi(value) * 1024; 1342841339f0Smanu if (pthread__stacksize > pthread__main->pt_stack.ss_size) 1343841339f0Smanu pthread__stacksize = pthread__main->pt_stack.ss_size; 13449e287199Sad } 13459e287199Sad if (pthread__stacksize == 0) 1346841339f0Smanu pthread__stacksize = pthread__main->pt_stack.ss_size; 1347a5644095Sjoerg pthread__stacksize += pthread__pagesize - 1; 1348677d666cSdrochner pthread__stacksize &= ~(pthread__pagesize - 1); 1349a5644095Sjoerg if (pthread__stacksize < 4 * pthread__pagesize) 13509e287199Sad errx(1, "Stacksize limit is too low, minimum %zd kbyte.", 1351a5644095Sjoerg 4 * pthread__pagesize / 1024); 13529e287199Sad 1353841339f0Smanu *newt = pthread__main; 1354082d249aSpooka #if defined(_PTHREAD_GETTCB_EXT) 1355841339f0Smanu pthread__main->pt_tls = _PTHREAD_GETTCB_EXT(); 1356082d249aSpooka #elif defined(__HAVE___LWP_GETTCB_FAST) 1357841339f0Smanu pthread__main->pt_tls = __lwp_gettcb_fast(); 1358928f301bSjoerg #else 1359841339f0Smanu pthread__main->pt_tls = _lwp_getprivate(); 1360928f301bSjoerg #endif 1361841339f0Smanu pthread__main->pt_tls->tcb_pthread = pthread__main; 13629e287199Sad } 13639e287199Sad 1364d79b47c3Srmind static signed int 1365cb292d56Schristos /*ARGSUSED*/ 1366d79b47c3Srmind pthread__cmp(void *ctx, const void *n1, const void *n2) 13679583eeb2Sad { 13689d94c026Sapb const uintptr_t p1 = (const uintptr_t)n1; 13699d94c026Sapb const uintptr_t p2 = (const uintptr_t)n2; 1370f1c955a1Sdrochner 1371d79b47c3Srmind if (p1 < p2) 1372d79b47c3Srmind return -1; 1373d79b47c3Srmind if (p1 > p2) 1374f1c955a1Sdrochner return 1; 1375d79b47c3Srmind return 0; 13769583eeb2Sad } 13779583eeb2Sad 137815e9cec1Sad /* Because getenv() wants to use locks. */ 137915e9cec1Sad char * 138015e9cec1Sad pthread__getenv(const char *name) 138115e9cec1Sad { 1382103af04bStron extern char **environ; 1383103af04bStron size_t l_name, offset; 138415e9cec1Sad 1385d3a99cd5Sjoerg if (issetugid()) 1386d3a99cd5Sjoerg return (NULL); 1387d3a99cd5Sjoerg 1388103af04bStron l_name = strlen(name); 1389103af04bStron for (offset = 0; environ[offset] != NULL; offset++) { 1390103af04bStron if (strncmp(name, environ[offset], l_name) == 0 && 1391103af04bStron environ[offset][l_name] == '=') { 1392103af04bStron return environ[offset] + l_name + 1; 1393103af04bStron } 1394103af04bStron } 1395103af04bStron 1396103af04bStron return NULL; 139715e9cec1Sad } 139815e9cec1Sad 13992bcb8bf1Sad pthread_mutex_t * 14002bcb8bf1Sad pthread__hashlock(volatile const void *p) 14012bcb8bf1Sad { 14022bcb8bf1Sad uintptr_t v; 140315e9cec1Sad 14042bcb8bf1Sad v = (uintptr_t)p; 14052bcb8bf1Sad return &hashlocks[((v >> 9) ^ (v >> 3)) & (NHASHLOCK - 1)].mutex; 14062bcb8bf1Sad } 1407cbd43ffaSad 1408cbd43ffaSad int 1409cbd43ffaSad pthread__checkpri(int pri) 1410cbd43ffaSad { 1411c0038aadSmatt static int havepri; 1412c0038aadSmatt static long min, max; 1413cbd43ffaSad 1414cbd43ffaSad if (!havepri) { 1415cbd43ffaSad min = sysconf(_SC_SCHED_PRI_MIN); 1416cbd43ffaSad max = sysconf(_SC_SCHED_PRI_MAX); 1417cbd43ffaSad havepri = 1; 1418cbd43ffaSad } 1419cbd43ffaSad return (pri < min || pri > max) ? EINVAL : 0; 1420cbd43ffaSad } 1421