1*5f391f4aSjoerg /* $NetBSD: pthread.c,v 1.148 2017/07/02 16:41:32 joerg Exp $ */ 2c62a74e6Sthorpej 3c62a74e6Sthorpej /*- 4eceac52fSad * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5c62a74e6Sthorpej * All rights reserved. 6c62a74e6Sthorpej * 7c62a74e6Sthorpej * This code is derived from software contributed to The NetBSD Foundation 81ac6a89bSad * by Nathan J. Williams and Andrew Doran. 9c62a74e6Sthorpej * 10c62a74e6Sthorpej * Redistribution and use in source and binary forms, with or without 11c62a74e6Sthorpej * modification, are permitted provided that the following conditions 12c62a74e6Sthorpej * are met: 13c62a74e6Sthorpej * 1. Redistributions of source code must retain the above copyright 14c62a74e6Sthorpej * notice, this list of conditions and the following disclaimer. 15c62a74e6Sthorpej * 2. Redistributions in binary form must reproduce the above copyright 16c62a74e6Sthorpej * notice, this list of conditions and the following disclaimer in the 17c62a74e6Sthorpej * documentation and/or other materials provided with the distribution. 18c62a74e6Sthorpej * 19c62a74e6Sthorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20c62a74e6Sthorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21c62a74e6Sthorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22c62a74e6Sthorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23c62a74e6Sthorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24c62a74e6Sthorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25c62a74e6Sthorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26c62a74e6Sthorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27c62a74e6Sthorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28c62a74e6Sthorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29c62a74e6Sthorpej * POSSIBILITY OF SUCH DAMAGE. 30c62a74e6Sthorpej */ 31c62a74e6Sthorpej 32f043c0fbSlukem #include <sys/cdefs.h> 33*5f391f4aSjoerg __RCSID("$NetBSD: pthread.c,v 1.148 2017/07/02 16:41:32 joerg Exp $"); 349e287199Sad 359e287199Sad #define __EXPOSE_STACK 1 369e287199Sad 379e287199Sad #include <sys/param.h> 38a5644095Sjoerg #include <sys/exec_elf.h> 399e287199Sad #include <sys/mman.h> 400645d95bSjoerg #include <sys/lwp.h> 4166ac2ffaSad #include <sys/lwpctl.h> 422bf5f9dbSdsl #include <sys/resource.h> 43*5f391f4aSjoerg #include <sys/sysctl.h> 44aad59997Sjoerg #include <sys/tls.h> 45*5f391f4aSjoerg #include <uvm/uvm_param.h> 46f043c0fbSlukem 47a5644095Sjoerg #include <assert.h> 48a5644095Sjoerg #include <dlfcn.h> 49c62a74e6Sthorpej #include <err.h> 50c62a74e6Sthorpej #include <errno.h> 51c62a74e6Sthorpej #include <lwp.h> 52c62a74e6Sthorpej #include <signal.h> 538bcff70bSnathanw #include <stdio.h> 54c62a74e6Sthorpej #include <stdlib.h> 55d79b47c3Srmind #include <stddef.h> 56c62a74e6Sthorpej #include <string.h> 570172694eSnathanw #include <syslog.h> 58c62a74e6Sthorpej #include <ucontext.h> 598bcff70bSnathanw #include <unistd.h> 60c62a74e6Sthorpej #include <sched.h> 619e287199Sad 62c62a74e6Sthorpej #include "pthread.h" 63c62a74e6Sthorpej #include "pthread_int.h" 64082d249aSpooka #include "pthread_makelwp.h" 6571d484f9Schristos #include "reentrant.h" 66c62a74e6Sthorpej 679583eeb2Sad pthread_rwlock_t pthread__alltree_lock = PTHREAD_RWLOCK_INITIALIZER; 68d79b47c3Srmind static rb_tree_t pthread__alltree; 699583eeb2Sad 70d79b47c3Srmind static signed int pthread__cmp(void *, const void *, const void *); 71d79b47c3Srmind 72d79b47c3Srmind static const rb_tree_ops_t pthread__alltree_ops = { 73d79b47c3Srmind .rbto_compare_nodes = pthread__cmp, 74d79b47c3Srmind .rbto_compare_key = pthread__cmp, 75d79b47c3Srmind .rbto_node_offset = offsetof(struct __pthread_st, pt_alltree), 76d79b47c3Srmind .rbto_context = NULL 77d79b47c3Srmind }; 789583eeb2Sad 7961cac435Sad static void pthread__create_tramp(void *); 8050fa8db4Sad static void pthread__initthread(pthread_t); 81b8833ff5Sad static void pthread__scrubthread(pthread_t, char *, int); 829e287199Sad static void pthread__initmain(pthread_t *); 837de9da97Sad static void pthread__fork_callback(void); 84989565f8Sad static void pthread__reap(pthread_t); 85c6409540Schristos static void pthread__child_callback(void); 86c6409540Schristos static void pthread__start(void); 87c62a74e6Sthorpej 8815e9cec1Sad void pthread__init(void); 8915e9cec1Sad 90c62a74e6Sthorpej int pthread__started; 9171d484f9Schristos int __uselibcstub = 1; 92f4fd6b79Sad pthread_mutex_t pthread__deadqueue_lock = PTHREAD_MUTEX_INITIALIZER; 9350fa8db4Sad pthread_queue_t pthread__deadqueue; 94f1b2c1c4Sad pthread_queue_t pthread__allqueue; 95c62a74e6Sthorpej 96c62a74e6Sthorpej static pthread_attr_t pthread_default_attr; 9766ac2ffaSad static lwpctl_t pthread__dummy_lwpctl = { .lc_curcpu = LWPCTL_CPU_NONE }; 98c62a74e6Sthorpej 990172694eSnathanw enum { 1000172694eSnathanw DIAGASSERT_ABORT = 1<<0, 1010172694eSnathanw DIAGASSERT_STDERR = 1<<1, 1020172694eSnathanw DIAGASSERT_SYSLOG = 1<<2 1030172694eSnathanw }; 104df277271Snathanw 105beaa63b6Sad static int pthread__diagassert; 106df277271Snathanw 107c94f5a91Sad int pthread__concurrency; 108c94f5a91Sad int pthread__nspins; 109ded26025Sad int pthread__unpark_max = PTHREAD__UNPARK_MAX; 1100f48379fSchristos int pthread__dbg; /* set by libpthread_dbg if active */ 111f2f10664Scl 1129e287199Sad /* 1139e287199Sad * We have to initialize the pthread_stack* variables here because 1149e287199Sad * mutexes are used before pthread_init() and thus pthread__initmain() 1159e287199Sad * are called. Since mutexes only save the stack pointer and not a 1169e287199Sad * pointer to the thread data, it is safe to change the mapping from 1179e287199Sad * stack pointer to thread data afterwards. 1189e287199Sad */ 119a5644095Sjoerg size_t pthread__stacksize; 120*5f391f4aSjoerg size_t pthread__guardsize; 121a5644095Sjoerg size_t pthread__pagesize; 122841339f0Smanu static struct __pthread_st *pthread__main; 123841339f0Smanu static size_t __pthread_st_size; 1249e287199Sad 125f782e995Sdrochner int _sys___sigprocmask14(int, const sigset_t *, sigset_t *); 126f782e995Sdrochner 127c62a74e6Sthorpej __strong_alias(__libc_thr_self,pthread_self) 1287dc01dbfSthorpej __strong_alias(__libc_thr_create,pthread_create) 1297dc01dbfSthorpej __strong_alias(__libc_thr_exit,pthread_exit) 130c62a74e6Sthorpej __strong_alias(__libc_thr_errno,pthread__errno) 1319e5c8705Snathanw __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate) 132095b25e7Sdrochner __strong_alias(__libc_thr_equal,pthread_equal) 13315e9cec1Sad __strong_alias(__libc_thr_init,pthread__init) 134c62a74e6Sthorpej 135c62a74e6Sthorpej /* 136c62a74e6Sthorpej * Static library kludge. Place a reference to a symbol any library 137c62a74e6Sthorpej * file which does not already have a reference here. 138c62a74e6Sthorpej */ 139c62a74e6Sthorpej extern int pthread__cancel_stub_binder; 140c62a74e6Sthorpej 141c62a74e6Sthorpej void *pthread__static_lib_binder[] = { 142c62a74e6Sthorpej &pthread__cancel_stub_binder, 143c62a74e6Sthorpej pthread_cond_init, 144c62a74e6Sthorpej pthread_mutex_init, 145c62a74e6Sthorpej pthread_rwlock_init, 146c62a74e6Sthorpej pthread_barrier_init, 147c62a74e6Sthorpej pthread_key_create, 148bd9a18b7Snathanw pthread_setspecific, 149c62a74e6Sthorpej }; 150c62a74e6Sthorpej 1512bcb8bf1Sad #define NHASHLOCK 64 1522bcb8bf1Sad 1532bcb8bf1Sad static union hashlock { 1542bcb8bf1Sad pthread_mutex_t mutex; 1552bcb8bf1Sad char pad[64]; 1562bcb8bf1Sad } hashlocks[NHASHLOCK] __aligned(64); 1572bcb8bf1Sad 158c62a74e6Sthorpej /* 159c62a74e6Sthorpej * This needs to be started by the library loading code, before main() 160c62a74e6Sthorpej * gets to run, for various things that use the state of the initial thread 161c62a74e6Sthorpej * to work properly (thread-specific data is an application-visible example; 162c62a74e6Sthorpej * spinlock counts for mutexes is an internal example). 163c62a74e6Sthorpej */ 164c62a74e6Sthorpej void 16515e9cec1Sad pthread__init(void) 166c62a74e6Sthorpej { 167c62a74e6Sthorpej pthread_t first; 1680172694eSnathanw char *p; 1690645d95bSjoerg int i; 170*5f391f4aSjoerg int mib[2]; 171*5f391f4aSjoerg unsigned int value; 172*5f391f4aSjoerg size_t len; 173c62a74e6Sthorpej extern int __isthreaded; 174c62a74e6Sthorpej 175841339f0Smanu /* 176841339f0Smanu * Allocate pthread_keys descriptors before 177841339f0Smanu * reseting __uselibcstub because otherwise 178841339f0Smanu * malloc() will call pthread_keys_create() 179841339f0Smanu * while pthread_keys descriptors are not 180841339f0Smanu * yet allocated. 181841339f0Smanu */ 182cb27e655Schristos pthread__main = pthread_tsd_init(&__pthread_st_size); 183cb27e655Schristos if (pthread__main == NULL) 184cb27e655Schristos err(EXIT_FAILURE, "Cannot allocate pthread storage"); 185841339f0Smanu 18671d484f9Schristos __uselibcstub = 0; 18771d484f9Schristos 188a5644095Sjoerg pthread__pagesize = (size_t)sysconf(_SC_PAGESIZE); 189284dc1a8Schristos pthread__concurrency = (int)sysconf(_SC_NPROCESSORS_CONF); 190c3f8e2eeSad 191*5f391f4aSjoerg mib[0] = CTL_VM; 192*5f391f4aSjoerg mib[1] = VM_THREAD_GUARD_SIZE; 193*5f391f4aSjoerg len = sizeof(value); 194*5f391f4aSjoerg if (sysctl(mib, __arraycount(mib), &value, &len, NULL, 0) == 0) 195*5f391f4aSjoerg pthread__guardsize = value; 196*5f391f4aSjoerg else 197*5f391f4aSjoerg pthread__guardsize = pthread__pagesize; 198*5f391f4aSjoerg 199c62a74e6Sthorpej /* Initialize locks first; they're needed elsewhere. */ 200b8833ff5Sad pthread__lockprim_init(); 2012bcb8bf1Sad for (i = 0; i < NHASHLOCK; i++) { 2022bcb8bf1Sad pthread_mutex_init(&hashlocks[i].mutex, NULL); 2032bcb8bf1Sad } 204f2f10664Scl 205b8833ff5Sad /* Fetch parameters. */ 2063247035dSad i = (int)_lwp_unpark_all(NULL, 0, NULL); 2073247035dSad if (i == -1) 208841339f0Smanu err(EXIT_FAILURE, "_lwp_unpark_all"); 209ded26025Sad if (i < pthread__unpark_max) 210ded26025Sad pthread__unpark_max = i; 211c62a74e6Sthorpej 212c62a74e6Sthorpej /* Basic data structure setup */ 213c62a74e6Sthorpej pthread_attr_init(&pthread_default_attr); 214f1b2c1c4Sad PTQ_INIT(&pthread__allqueue); 215c62a74e6Sthorpej PTQ_INIT(&pthread__deadqueue); 216d79b47c3Srmind 217d79b47c3Srmind rb_tree_init(&pthread__alltree, &pthread__alltree_ops); 2189e287199Sad 219c62a74e6Sthorpej /* Create the thread structure corresponding to main() */ 220c62a74e6Sthorpej pthread__initmain(&first); 22150fa8db4Sad pthread__initthread(first); 222b8833ff5Sad pthread__scrubthread(first, NULL, 0); 2231ac6a89bSad 2241ac6a89bSad first->pt_lid = _lwp_self(); 225f1b2c1c4Sad PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq); 226d79b47c3Srmind (void)rb_tree_insert_node(&pthread__alltree, first); 227c62a74e6Sthorpej 2287de9da97Sad if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &first->pt_lwpctl) != 0) { 229841339f0Smanu err(EXIT_FAILURE, "_lwp_ctl"); 2307de9da97Sad } 2317de9da97Sad 232c62a74e6Sthorpej /* Start subsystems */ 233c62a74e6Sthorpej PTHREAD_MD_INIT 234c62a74e6Sthorpej 23515e9cec1Sad for (p = pthread__getenv("PTHREAD_DIAGASSERT"); p && *p; p++) { 2360172694eSnathanw switch (*p) { 2370172694eSnathanw case 'a': 2380172694eSnathanw pthread__diagassert |= DIAGASSERT_ABORT; 2390172694eSnathanw break; 2400172694eSnathanw case 'A': 2410172694eSnathanw pthread__diagassert &= ~DIAGASSERT_ABORT; 2420172694eSnathanw break; 2430172694eSnathanw case 'e': 2440172694eSnathanw pthread__diagassert |= DIAGASSERT_STDERR; 2450172694eSnathanw break; 2460172694eSnathanw case 'E': 2470172694eSnathanw pthread__diagassert &= ~DIAGASSERT_STDERR; 2480172694eSnathanw break; 2490172694eSnathanw case 'l': 2500172694eSnathanw pthread__diagassert |= DIAGASSERT_SYSLOG; 2510172694eSnathanw break; 2520172694eSnathanw case 'L': 2530172694eSnathanw pthread__diagassert &= ~DIAGASSERT_SYSLOG; 2540172694eSnathanw break; 255df277271Snathanw } 2560172694eSnathanw } 2570172694eSnathanw 258c62a74e6Sthorpej /* Tell libc that we're here and it should role-play accordingly. */ 2597de9da97Sad pthread_atfork(NULL, NULL, pthread__fork_callback); 260c62a74e6Sthorpej __isthreaded = 1; 261c62a74e6Sthorpej } 262c62a74e6Sthorpej 2632a4cef11Snathanw static void 2647de9da97Sad pthread__fork_callback(void) 2657de9da97Sad { 2663cdda339Senami struct __pthread_st *self = pthread__self(); 2677de9da97Sad 2687de9da97Sad /* lwpctl state is not copied across fork. */ 2693cdda339Senami if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) { 270841339f0Smanu err(EXIT_FAILURE, "_lwp_ctl"); 2717de9da97Sad } 272fc70b598Sexplorer self->pt_lid = _lwp_self(); 2737de9da97Sad } 2747de9da97Sad 2757de9da97Sad static void 2762a4cef11Snathanw pthread__child_callback(void) 2772a4cef11Snathanw { 27866ac2ffaSad 2792a4cef11Snathanw /* 2802a4cef11Snathanw * Clean up data structures that a forked child process might 2812a4cef11Snathanw * trip over. Note that if threads have been created (causing 2822a4cef11Snathanw * this handler to be registered) the standards say that the 2832a4cef11Snathanw * child will trigger undefined behavior if it makes any 2842a4cef11Snathanw * pthread_* calls (or any other calls that aren't 2852a4cef11Snathanw * async-signal-safe), so we don't really have to clean up 2862a4cef11Snathanw * much. Anything that permits some pthread_* calls to work is 2872a4cef11Snathanw * merely being polite. 2882a4cef11Snathanw */ 2892a4cef11Snathanw pthread__started = 0; 2902a4cef11Snathanw } 291c62a74e6Sthorpej 2920e675542Schs static void 293c62a74e6Sthorpej pthread__start(void) 294c62a74e6Sthorpej { 295c62a74e6Sthorpej 296ff14fbf2Snathanw /* 297ff14fbf2Snathanw * Per-process timers are cleared by fork(); despite the 298ff14fbf2Snathanw * various restrictions on fork() and threads, it's legal to 299ff14fbf2Snathanw * fork() before creating any threads. 300ff14fbf2Snathanw */ 3012a4cef11Snathanw pthread_atfork(NULL, NULL, pthread__child_callback); 302c62a74e6Sthorpej } 303c62a74e6Sthorpej 304c62a74e6Sthorpej 305c62a74e6Sthorpej /* General-purpose thread data structure sanitization. */ 30650fa8db4Sad /* ARGSUSED */ 30750fa8db4Sad static void 30850fa8db4Sad pthread__initthread(pthread_t t) 309c62a74e6Sthorpej { 310c62a74e6Sthorpej 31115e9cec1Sad t->pt_self = t; 312c62a74e6Sthorpej t->pt_magic = PT_MAGIC; 313c3f8e2eeSad t->pt_willpark = 0; 314c3f8e2eeSad t->pt_unpark = 0; 3158ccc6e06Sad t->pt_nwaiters = 0; 316c3f8e2eeSad t->pt_sleepobj = NULL; 317c3f8e2eeSad t->pt_signalled = 0; 318b8833ff5Sad t->pt_havespecific = 0; 3198ccc6e06Sad t->pt_early = NULL; 32066ac2ffaSad t->pt_lwpctl = &pthread__dummy_lwpctl; 32166ac2ffaSad t->pt_blocking = 0; 322989565f8Sad t->pt_droplock = NULL; 3231ac6a89bSad 32415e9cec1Sad memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops)); 325f4fd6b79Sad pthread_mutex_init(&t->pt_lock, NULL); 326c62a74e6Sthorpej PTQ_INIT(&t->pt_cleanup_stack); 327989565f8Sad pthread_cond_init(&t->pt_joiners, NULL); 328c62a74e6Sthorpej } 329c62a74e6Sthorpej 330b8833ff5Sad static void 331b8833ff5Sad pthread__scrubthread(pthread_t t, char *name, int flags) 332b8833ff5Sad { 333b8833ff5Sad 334b8833ff5Sad t->pt_state = PT_STATE_RUNNING; 335b8833ff5Sad t->pt_exitval = NULL; 336b8833ff5Sad t->pt_flags = flags; 337b8833ff5Sad t->pt_cancel = 0; 338b8833ff5Sad t->pt_errno = 0; 339b8833ff5Sad t->pt_name = name; 340b8833ff5Sad t->pt_lid = 0; 341b8833ff5Sad } 342b8833ff5Sad 343a5644095Sjoerg static int 344d3660af7Sjoerg pthread__getstack(pthread_t newthread, const pthread_attr_t *attr) 345a5644095Sjoerg { 346d3660af7Sjoerg void *stackbase, *stackbase2, *redzone; 3471d34190eSjoerg size_t stacksize, guardsize; 348d3660af7Sjoerg bool allocated; 349a5644095Sjoerg 350344a2311Sjoerg if (attr != NULL) { 351344a2311Sjoerg pthread_attr_getstack(attr, &stackbase, &stacksize); 352*5f391f4aSjoerg pthread_attr_getguardsize(attr, &guardsize); 353344a2311Sjoerg } else { 354344a2311Sjoerg stackbase = NULL; 355344a2311Sjoerg stacksize = 0; 356*5f391f4aSjoerg guardsize = pthread__guardsize; 357344a2311Sjoerg } 358344a2311Sjoerg if (stacksize == 0) 359344a2311Sjoerg stacksize = pthread__stacksize; 360344a2311Sjoerg 361d3660af7Sjoerg if (newthread->pt_stack_allocated) { 362e917deeeSdrochner if (stackbase == NULL && 363*5f391f4aSjoerg newthread->pt_stack.ss_size == stacksize && 364*5f391f4aSjoerg newthread->pt_guardsize == guardsize) 365d3660af7Sjoerg return 0; 366d3660af7Sjoerg stackbase2 = newthread->pt_stack.ss_sp; 367d3660af7Sjoerg #ifndef __MACHINE_STACK_GROWS_UP 368d3660af7Sjoerg stackbase2 = (char *)stackbase2 - newthread->pt_guardsize; 369d3660af7Sjoerg #endif 370d3660af7Sjoerg munmap(stackbase2, 371d3660af7Sjoerg newthread->pt_stack.ss_size + newthread->pt_guardsize); 372d3660af7Sjoerg newthread->pt_stack.ss_sp = NULL; 373d3660af7Sjoerg newthread->pt_stack.ss_size = 0; 374d3660af7Sjoerg newthread->pt_guardsize = 0; 375d3660af7Sjoerg newthread->pt_stack_allocated = false; 376d3660af7Sjoerg } 377d3660af7Sjoerg 378d3660af7Sjoerg newthread->pt_stack_allocated = false; 379d3660af7Sjoerg 380344a2311Sjoerg if (stackbase == NULL) { 381d3660af7Sjoerg stacksize = ((stacksize - 1) | (pthread__pagesize - 1)) + 1; 382*5f391f4aSjoerg guardsize = ((guardsize - 1) | (pthread__pagesize - 1)) + 1; 3831d34190eSjoerg stackbase = mmap(NULL, stacksize + guardsize, 384a5644095Sjoerg PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, (off_t)0); 385a5644095Sjoerg if (stackbase == MAP_FAILED) 386a5644095Sjoerg return ENOMEM; 387d3660af7Sjoerg allocated = true; 3881d34190eSjoerg } else { 389d3660af7Sjoerg allocated = false; 390344a2311Sjoerg } 391a5644095Sjoerg #ifdef __MACHINE_STACK_GROWS_UP 392d3660af7Sjoerg redzone = (char *)stackbase + stacksize; 393d3660af7Sjoerg stackbase2 = (char *)stackbase; 394a5644095Sjoerg #else 395a5644095Sjoerg redzone = (char *)stackbase; 396d3660af7Sjoerg stackbase2 = (char *)stackbase + guardsize; 397a5644095Sjoerg #endif 398d3660af7Sjoerg if (allocated && guardsize && 399d3660af7Sjoerg mprotect(redzone, guardsize, PROT_NONE) == -1) { 4001d34190eSjoerg munmap(stackbase, stacksize + guardsize); 401a5644095Sjoerg return EPERM; 402a5644095Sjoerg } 403d3660af7Sjoerg newthread->pt_stack.ss_size = stacksize; 404d3660af7Sjoerg newthread->pt_stack.ss_sp = stackbase2; 405d3660af7Sjoerg newthread->pt_guardsize = guardsize; 406d3660af7Sjoerg newthread->pt_stack_allocated = allocated; 407a5644095Sjoerg return 0; 408a5644095Sjoerg } 409c62a74e6Sthorpej 410c62a74e6Sthorpej int 411c62a74e6Sthorpej pthread_create(pthread_t *thread, const pthread_attr_t *attr, 412c62a74e6Sthorpej void *(*startfunc)(void *), void *arg) 413c62a74e6Sthorpej { 414d9adedd7Sad pthread_t newthread; 415c62a74e6Sthorpej pthread_attr_t nattr; 416b33971b9Sthorpej struct pthread_attr_private *p; 4173ca3d0b1Schristos char * volatile name; 418ed964af1Sad unsigned long flag; 419aad59997Sjoerg void *private_area; 420ed964af1Sad int ret; 421c62a74e6Sthorpej 42271d484f9Schristos if (__predict_false(__uselibcstub)) { 42371d484f9Schristos pthread__errorfunc(__FILE__, __LINE__, __func__, 42471d484f9Schristos "pthread_create() requires linking with -lpthread"); 42571d484f9Schristos return __libc_thr_create_stub(thread, attr, startfunc, arg); 42671d484f9Schristos } 42771d484f9Schristos 428c62a74e6Sthorpej /* 429c62a74e6Sthorpej * It's okay to check this without a lock because there can 430c62a74e6Sthorpej * only be one thread before it becomes true. 431c62a74e6Sthorpej */ 432c62a74e6Sthorpej if (pthread__started == 0) { 433c62a74e6Sthorpej pthread__start(); 434c62a74e6Sthorpej pthread__started = 1; 435c62a74e6Sthorpej } 436c62a74e6Sthorpej 437c62a74e6Sthorpej if (attr == NULL) 438c62a74e6Sthorpej nattr = pthread_default_attr; 439e81f9f17Sdrochner else if (attr->pta_magic == PT_ATTR_MAGIC) 440c62a74e6Sthorpej nattr = *attr; 441c62a74e6Sthorpej else 442c62a74e6Sthorpej return EINVAL; 443c62a74e6Sthorpej 444b33971b9Sthorpej /* Fetch misc. attributes from the attr structure. */ 445508a50acSnathanw name = NULL; 446508a50acSnathanw if ((p = nattr.pta_private) != NULL) 447508a50acSnathanw if (p->ptap_name[0] != '\0') 448b33971b9Sthorpej if ((name = strdup(p->ptap_name)) == NULL) 449b33971b9Sthorpej return ENOMEM; 450c62a74e6Sthorpej 451a014cf23Sad newthread = NULL; 452c62a74e6Sthorpej 453b8833ff5Sad /* 454b8833ff5Sad * Try to reclaim a dead thread. 455b8833ff5Sad */ 456a014cf23Sad if (!PTQ_EMPTY(&pthread__deadqueue)) { 457f4fd6b79Sad pthread_mutex_lock(&pthread__deadqueue_lock); 45885ddadbfSchristos PTQ_FOREACH(newthread, &pthread__deadqueue, pt_deadq) { 459a014cf23Sad /* Still running? */ 46085ddadbfSchristos if (newthread->pt_lwpctl->lc_curcpu == 46185ddadbfSchristos LWPCTL_CPU_EXITED || 46285ddadbfSchristos (_lwp_kill(newthread->pt_lid, 0) == -1 && 46385ddadbfSchristos errno == ESRCH)) 46485ddadbfSchristos break; 46550fa8db4Sad } 46685ddadbfSchristos if (newthread) 46785ddadbfSchristos PTQ_REMOVE(&pthread__deadqueue, newthread, pt_deadq); 468f4fd6b79Sad pthread_mutex_unlock(&pthread__deadqueue_lock); 469928f301bSjoerg #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 470928f301bSjoerg if (newthread && newthread->pt_tls) { 471928f301bSjoerg _rtld_tls_free(newthread->pt_tls); 472928f301bSjoerg newthread->pt_tls = NULL; 473928f301bSjoerg } 474928f301bSjoerg #endif 475a014cf23Sad } 476a014cf23Sad 477b8833ff5Sad /* 478b8833ff5Sad * If necessary set up a stack, allocate space for a pthread_st, 479b8833ff5Sad * and initialize it. 480b8833ff5Sad */ 4814cdc2ed8Syamt if (newthread == NULL) { 482841339f0Smanu newthread = calloc(1, __pthread_st_size); 483a5644095Sjoerg if (newthread == NULL) { 484b7559f85Schristos free(name); 485a5644095Sjoerg return ENOMEM; 486a5644095Sjoerg } 487d3660af7Sjoerg newthread->pt_stack_allocated = false; 488a5644095Sjoerg 489d3660af7Sjoerg if (pthread__getstack(newthread, attr)) { 490a5644095Sjoerg free(newthread); 491a5644095Sjoerg free(name); 492a5644095Sjoerg return ENOMEM; 493c62a74e6Sthorpej } 494b33971b9Sthorpej 495928f301bSjoerg #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 496928f301bSjoerg newthread->pt_tls = NULL; 497928f301bSjoerg #endif 498b8833ff5Sad 499b8833ff5Sad /* Add to list of all threads. */ 5009583eeb2Sad pthread_rwlock_wrlock(&pthread__alltree_lock); 501f1b2c1c4Sad PTQ_INSERT_TAIL(&pthread__allqueue, newthread, pt_allq); 502d79b47c3Srmind (void)rb_tree_insert_node(&pthread__alltree, newthread); 5039583eeb2Sad pthread_rwlock_unlock(&pthread__alltree_lock); 504b8833ff5Sad 505b8833ff5Sad /* Will be reset by the thread upon exit. */ 506b8833ff5Sad pthread__initthread(newthread); 507d3660af7Sjoerg } else { 508d3660af7Sjoerg if (pthread__getstack(newthread, attr)) { 509d3660af7Sjoerg pthread_mutex_lock(&pthread__deadqueue_lock); 510d3660af7Sjoerg PTQ_INSERT_TAIL(&pthread__deadqueue, newthread, pt_deadq); 511d3660af7Sjoerg pthread_mutex_unlock(&pthread__deadqueue_lock); 512d3660af7Sjoerg return ENOMEM; 513d3660af7Sjoerg } 514ed964af1Sad } 515ed964af1Sad 516b8833ff5Sad /* 517b8833ff5Sad * Create the new LWP. 518b8833ff5Sad */ 519b8833ff5Sad pthread__scrubthread(newthread, name, nattr.pta_flags); 52061cac435Sad newthread->pt_func = startfunc; 52161cac435Sad newthread->pt_arg = arg; 522aad59997Sjoerg #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 523aad59997Sjoerg private_area = newthread->pt_tls = _rtld_tls_allocate(); 524aad59997Sjoerg newthread->pt_tls->tcb_pthread = newthread; 525aad59997Sjoerg #else 526aad59997Sjoerg private_area = newthread; 527aad59997Sjoerg #endif 528aad59997Sjoerg 529989565f8Sad flag = LWP_DETACHED; 530cbd43ffaSad if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0 || 5310e006eebSad (nattr.pta_flags & PT_FLAG_EXPLICIT_SCHED) != 0) 532ded26025Sad flag |= LWP_SUSPENDED; 533082d249aSpooka 534082d249aSpooka ret = pthread__makelwp(pthread__create_tramp, newthread, private_area, 535082d249aSpooka newthread->pt_stack.ss_sp, newthread->pt_stack.ss_size, 536082d249aSpooka flag, &newthread->pt_lid); 5371ac6a89bSad if (ret != 0) { 538b0ce37c2Sdrochner ret = errno; 5390dcf29f9Srmind pthread_mutex_lock(&newthread->pt_lock); 5400dcf29f9Srmind /* Will unlock and free name. */ 5410dcf29f9Srmind pthread__reap(newthread); 5421ac6a89bSad return ret; 5431ac6a89bSad } 5441ac6a89bSad 5450e006eebSad if ((nattr.pta_flags & PT_FLAG_EXPLICIT_SCHED) != 0) { 546cbd43ffaSad if (p != NULL) { 547cbd43ffaSad (void)pthread_setschedparam(newthread, p->ptap_policy, 548cbd43ffaSad &p->ptap_sp); 549cbd43ffaSad } 550cbd43ffaSad if ((newthread->pt_flags & PT_FLAG_SUSPENDED) == 0) { 551cbd43ffaSad (void)_lwp_continue(newthread->pt_lid); 552cbd43ffaSad } 553cbd43ffaSad } 554cbd43ffaSad 555c62a74e6Sthorpej *thread = newthread; 556c62a74e6Sthorpej 557c62a74e6Sthorpej return 0; 558c62a74e6Sthorpej } 559c62a74e6Sthorpej 560c62a74e6Sthorpej 56167f518f4Sjoerg __dead static void 56261cac435Sad pthread__create_tramp(void *cookie) 563c62a74e6Sthorpej { 56461cac435Sad pthread_t self; 565c62a74e6Sthorpej void *retval; 566c62a74e6Sthorpej 56761cac435Sad self = cookie; 56866ac2ffaSad 56950fa8db4Sad /* 57050fa8db4Sad * Throw away some stack in a feeble attempt to reduce cache 57150fa8db4Sad * thrash. May help for SMT processors. XXX We should not 57250fa8db4Sad * be allocating stacks on fixed 2MB boundaries. Needs a 57361cac435Sad * thread register or decent thread local storage. 57461cac435Sad * 57561cac435Sad * Note that we may race with the kernel in _lwp_create(), 57661cac435Sad * and so pt_lid can be unset at this point, but we don't 577f63239c2Sad * care. 57850fa8db4Sad */ 579f63239c2Sad (void)alloca(((unsigned)self->pt_lid & 7) << 8); 580f63239c2Sad 581f63239c2Sad if (self->pt_name != NULL) { 582f63239c2Sad pthread_mutex_lock(&self->pt_lock); 583f63239c2Sad if (self->pt_name != NULL) 58466ac2ffaSad (void)_lwp_setname(0, self->pt_name); 585f63239c2Sad pthread_mutex_unlock(&self->pt_lock); 586f63239c2Sad } 58750fa8db4Sad 588eceac52fSad if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) { 589841339f0Smanu err(EXIT_FAILURE, "_lwp_ctl"); 590eceac52fSad } 59166ac2ffaSad 59261cac435Sad retval = (*self->pt_func)(self->pt_arg); 593c62a74e6Sthorpej 594c62a74e6Sthorpej pthread_exit(retval); 595c62a74e6Sthorpej 596143f5a27Schristos /*NOTREACHED*/ 597143f5a27Schristos pthread__abort(); 598c62a74e6Sthorpej } 599c62a74e6Sthorpej 60038b1c6f4Schristos int 60138b1c6f4Schristos pthread_suspend_np(pthread_t thread) 60238b1c6f4Schristos { 603ba70e96aSchs pthread_t self; 604ba70e96aSchs 605ba70e96aSchs self = pthread__self(); 60638b1c6f4Schristos if (self == thread) { 60738b1c6f4Schristos return EDEADLK; 60838b1c6f4Schristos } 609d9adedd7Sad if (pthread__find(thread) != 0) 610ba70e96aSchs return ESRCH; 61140724da2Sad if (_lwp_suspend(thread->pt_lid) == 0) 61240724da2Sad return 0; 61340724da2Sad return errno; 61438b1c6f4Schristos } 61538b1c6f4Schristos 61638b1c6f4Schristos int 61738b1c6f4Schristos pthread_resume_np(pthread_t thread) 61838b1c6f4Schristos { 61938b1c6f4Schristos 620d9adedd7Sad if (pthread__find(thread) != 0) 621ba70e96aSchs return ESRCH; 62240724da2Sad if (_lwp_continue(thread->pt_lid) == 0) 62340724da2Sad return 0; 62440724da2Sad return errno; 62538b1c6f4Schristos } 62638b1c6f4Schristos 627c62a74e6Sthorpej void 628c62a74e6Sthorpej pthread_exit(void *retval) 629c62a74e6Sthorpej { 63096b5a26dSnathanw pthread_t self; 631c62a74e6Sthorpej struct pt_clean_t *cleanup; 632b33971b9Sthorpej char *name; 633c62a74e6Sthorpej 63471d484f9Schristos if (__predict_false(__uselibcstub)) { 63571d484f9Schristos __libc_thr_exit_stub(retval); 63671d484f9Schristos goto out; 63771d484f9Schristos } 63871d484f9Schristos 639c62a74e6Sthorpej self = pthread__self(); 640c62a74e6Sthorpej 641c62a74e6Sthorpej /* Disable cancellability. */ 642f4fd6b79Sad pthread_mutex_lock(&self->pt_lock); 643c62a74e6Sthorpej self->pt_flags |= PT_FLAG_CS_DISABLED; 64466fcc1ceSnathanw self->pt_cancel = 0; 645c62a74e6Sthorpej 646c62a74e6Sthorpej /* Call any cancellation cleanup handlers */ 647989565f8Sad if (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 648989565f8Sad pthread_mutex_unlock(&self->pt_lock); 649c62a74e6Sthorpej while (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 650c62a74e6Sthorpej cleanup = PTQ_FIRST(&self->pt_cleanup_stack); 651c62a74e6Sthorpej PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next); 652c62a74e6Sthorpej (*cleanup->ptc_cleanup)(cleanup->ptc_arg); 653c62a74e6Sthorpej } 654989565f8Sad pthread_mutex_lock(&self->pt_lock); 655989565f8Sad } 656c62a74e6Sthorpej 657c62a74e6Sthorpej /* Perform cleanup of thread-specific data */ 658c62a74e6Sthorpej pthread__destroy_tsd(self); 659c62a74e6Sthorpej 660989565f8Sad /* Signal our exit. */ 661c62a74e6Sthorpej self->pt_exitval = retval; 6626b2b9c62Syamt if (self->pt_flags & PT_FLAG_DETACHED) { 6636b2b9c62Syamt self->pt_state = PT_STATE_DEAD; 664b33971b9Sthorpej name = self->pt_name; 665b33971b9Sthorpej self->pt_name = NULL; 666f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 667b33971b9Sthorpej if (name != NULL) 668b33971b9Sthorpej free(name); 669989565f8Sad pthread_mutex_lock(&pthread__deadqueue_lock); 670989565f8Sad PTQ_INSERT_TAIL(&pthread__deadqueue, self, pt_deadq); 671989565f8Sad pthread_mutex_unlock(&pthread__deadqueue_lock); 6721ac6a89bSad _lwp_exit(); 673c62a74e6Sthorpej } else { 6746b2b9c62Syamt self->pt_state = PT_STATE_ZOMBIE; 675989565f8Sad pthread_cond_broadcast(&self->pt_joiners); 676f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 677b33971b9Sthorpej /* Note: name will be freed by the joiner. */ 6781ac6a89bSad _lwp_exit(); 679c62a74e6Sthorpej } 680c62a74e6Sthorpej 68171d484f9Schristos out: 682143f5a27Schristos /*NOTREACHED*/ 683143f5a27Schristos pthread__abort(); 684c62a74e6Sthorpej exit(1); 685c62a74e6Sthorpej } 686c62a74e6Sthorpej 687c62a74e6Sthorpej 688c62a74e6Sthorpej int 689c62a74e6Sthorpej pthread_join(pthread_t thread, void **valptr) 690c62a74e6Sthorpej { 691c62a74e6Sthorpej pthread_t self; 692989565f8Sad int error; 693c62a74e6Sthorpej 694c62a74e6Sthorpej self = pthread__self(); 695c62a74e6Sthorpej 696d9adedd7Sad if (pthread__find(thread) != 0) 697c62a74e6Sthorpej return ESRCH; 698c62a74e6Sthorpej 699c62a74e6Sthorpej if (thread->pt_magic != PT_MAGIC) 700c62a74e6Sthorpej return EINVAL; 701c62a74e6Sthorpej 702c62a74e6Sthorpej if (thread == self) 703c62a74e6Sthorpej return EDEADLK; 704c62a74e6Sthorpej 705989565f8Sad self->pt_droplock = &thread->pt_lock; 706989565f8Sad pthread_mutex_lock(&thread->pt_lock); 707989565f8Sad for (;;) { 708989565f8Sad if (thread->pt_state == PT_STATE_ZOMBIE) 709989565f8Sad break; 710989565f8Sad if (thread->pt_state == PT_STATE_DEAD) { 711989565f8Sad pthread_mutex_unlock(&thread->pt_lock); 712989565f8Sad self->pt_droplock = NULL; 713989565f8Sad return ESRCH; 714989565f8Sad } 715989565f8Sad if ((thread->pt_flags & PT_FLAG_DETACHED) != 0) { 716989565f8Sad pthread_mutex_unlock(&thread->pt_lock); 717989565f8Sad self->pt_droplock = NULL; 718989565f8Sad return EINVAL; 719989565f8Sad } 720989565f8Sad error = pthread_cond_wait(&thread->pt_joiners, 721989565f8Sad &thread->pt_lock); 722622bbc50Sad if (error != 0) { 723622bbc50Sad pthread__errorfunc(__FILE__, __LINE__, 724622bbc50Sad __func__, "unexpected return from cond_wait()"); 725c94f5a91Sad } 726622bbc50Sad 727c94f5a91Sad } 72839a9e711Sad pthread__testcancel(self); 7291ac6a89bSad if (valptr != NULL) 730ded26025Sad *valptr = thread->pt_exitval; 731989565f8Sad /* pthread__reap() will drop the lock. */ 732989565f8Sad pthread__reap(thread); 733989565f8Sad self->pt_droplock = NULL; 734989565f8Sad 735c94f5a91Sad return 0; 736c62a74e6Sthorpej } 737c62a74e6Sthorpej 738989565f8Sad static void 739989565f8Sad pthread__reap(pthread_t thread) 740989565f8Sad { 741989565f8Sad char *name; 742989565f8Sad 743989565f8Sad name = thread->pt_name; 744989565f8Sad thread->pt_name = NULL; 745989565f8Sad thread->pt_state = PT_STATE_DEAD; 746989565f8Sad pthread_mutex_unlock(&thread->pt_lock); 747989565f8Sad 748989565f8Sad pthread_mutex_lock(&pthread__deadqueue_lock); 749989565f8Sad PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_deadq); 750989565f8Sad pthread_mutex_unlock(&pthread__deadqueue_lock); 751989565f8Sad 752989565f8Sad if (name != NULL) 753989565f8Sad free(name); 754989565f8Sad } 755c62a74e6Sthorpej 756c62a74e6Sthorpej int 757c62a74e6Sthorpej pthread_equal(pthread_t t1, pthread_t t2) 758c62a74e6Sthorpej { 75971d484f9Schristos if (__predict_false(__uselibcstub)) 76071d484f9Schristos return __libc_thr_equal_stub(t1, t2); 761c62a74e6Sthorpej 762c62a74e6Sthorpej /* Nothing special here. */ 763c62a74e6Sthorpej return (t1 == t2); 764c62a74e6Sthorpej } 765c62a74e6Sthorpej 766c62a74e6Sthorpej 767c62a74e6Sthorpej int 768c62a74e6Sthorpej pthread_detach(pthread_t thread) 769c62a74e6Sthorpej { 770c62a74e6Sthorpej 771d9adedd7Sad if (pthread__find(thread) != 0) 772c62a74e6Sthorpej return ESRCH; 773c62a74e6Sthorpej 774c62a74e6Sthorpej if (thread->pt_magic != PT_MAGIC) 775c62a74e6Sthorpej return EINVAL; 776c62a74e6Sthorpej 777f4fd6b79Sad pthread_mutex_lock(&thread->pt_lock); 7781296e850Sad thread->pt_flags |= PT_FLAG_DETACHED; 779989565f8Sad if (thread->pt_state == PT_STATE_ZOMBIE) { 780989565f8Sad /* pthread__reap() will drop the lock. */ 781989565f8Sad pthread__reap(thread); 782989565f8Sad } else { 783989565f8Sad /* 784989565f8Sad * Not valid for threads to be waiting in 785989565f8Sad * pthread_join() (there are intractable 786989565f8Sad * sync issues from the application 787989565f8Sad * perspective), but give those threads 788989565f8Sad * a chance anyway. 789989565f8Sad */ 790989565f8Sad pthread_cond_broadcast(&thread->pt_joiners); 791f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 792989565f8Sad } 793de213816Sad 79440724da2Sad return 0; 795c62a74e6Sthorpej } 796c62a74e6Sthorpej 797c62a74e6Sthorpej 798c62a74e6Sthorpej int 799b33971b9Sthorpej pthread_getname_np(pthread_t thread, char *name, size_t len) 800c62a74e6Sthorpej { 801c62a74e6Sthorpej 802d9adedd7Sad if (pthread__find(thread) != 0) 803b33971b9Sthorpej return ESRCH; 804b33971b9Sthorpej 805b33971b9Sthorpej if (thread->pt_magic != PT_MAGIC) 806b33971b9Sthorpej return EINVAL; 807b33971b9Sthorpej 808f4fd6b79Sad pthread_mutex_lock(&thread->pt_lock); 809b33971b9Sthorpej if (thread->pt_name == NULL) 810b33971b9Sthorpej name[0] = '\0'; 811b33971b9Sthorpej else 812b33971b9Sthorpej strlcpy(name, thread->pt_name, len); 813f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 814c62a74e6Sthorpej 815c62a74e6Sthorpej return 0; 816c62a74e6Sthorpej } 817c62a74e6Sthorpej 818c62a74e6Sthorpej 819c62a74e6Sthorpej int 820b33971b9Sthorpej pthread_setname_np(pthread_t thread, const char *name, void *arg) 821b33971b9Sthorpej { 822b33971b9Sthorpej char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP]; 823b33971b9Sthorpej int namelen; 824b33971b9Sthorpej 825d9adedd7Sad if (pthread__find(thread) != 0) 826b33971b9Sthorpej return ESRCH; 827b33971b9Sthorpej 828b33971b9Sthorpej if (thread->pt_magic != PT_MAGIC) 829b33971b9Sthorpej return EINVAL; 830b33971b9Sthorpej 831b33971b9Sthorpej namelen = snprintf(newname, sizeof(newname), name, arg); 832b33971b9Sthorpej if (namelen >= PTHREAD_MAX_NAMELEN_NP) 833b33971b9Sthorpej return EINVAL; 834b33971b9Sthorpej 835b33971b9Sthorpej cp = strdup(newname); 836b33971b9Sthorpej if (cp == NULL) 837b33971b9Sthorpej return ENOMEM; 838b33971b9Sthorpej 839f4fd6b79Sad pthread_mutex_lock(&thread->pt_lock); 840b33971b9Sthorpej oldname = thread->pt_name; 841b33971b9Sthorpej thread->pt_name = cp; 842f63239c2Sad (void)_lwp_setname(thread->pt_lid, cp); 843f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 844b33971b9Sthorpej 845b33971b9Sthorpej if (oldname != NULL) 846b33971b9Sthorpej free(oldname); 847b33971b9Sthorpej 848b33971b9Sthorpej return 0; 849b33971b9Sthorpej } 850b33971b9Sthorpej 851b33971b9Sthorpej 852b33971b9Sthorpej 853c62a74e6Sthorpej /* 854c62a74e6Sthorpej * XXX There should be a way for applications to use the efficent 855c62a74e6Sthorpej * inline version, but there are opacity/namespace issues. 856c62a74e6Sthorpej */ 857c62a74e6Sthorpej pthread_t 858c62a74e6Sthorpej pthread_self(void) 859c62a74e6Sthorpej { 86071d484f9Schristos if (__predict_false(__uselibcstub)) 86171d484f9Schristos return (pthread_t)__libc_thr_self_stub(); 862c62a74e6Sthorpej 863c62a74e6Sthorpej return pthread__self(); 864c62a74e6Sthorpej } 865c62a74e6Sthorpej 866c62a74e6Sthorpej 867c62a74e6Sthorpej int 868c62a74e6Sthorpej pthread_cancel(pthread_t thread) 869c62a74e6Sthorpej { 870c62a74e6Sthorpej 871d9adedd7Sad if (pthread__find(thread) != 0) 872ba70e96aSchs return ESRCH; 873f4fd6b79Sad pthread_mutex_lock(&thread->pt_lock); 8741ac6a89bSad thread->pt_flags |= PT_FLAG_CS_PENDING; 8751ac6a89bSad if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) { 8761ac6a89bSad thread->pt_cancel = 1; 877f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 8781ac6a89bSad _lwp_wakeup(thread->pt_lid); 8791ac6a89bSad } else 880f4fd6b79Sad pthread_mutex_unlock(&thread->pt_lock); 881c62a74e6Sthorpej 882c62a74e6Sthorpej return 0; 883c62a74e6Sthorpej } 884c62a74e6Sthorpej 885c62a74e6Sthorpej 886c62a74e6Sthorpej int 887c62a74e6Sthorpej pthread_setcancelstate(int state, int *oldstate) 888c62a74e6Sthorpej { 889c62a74e6Sthorpej pthread_t self; 8900878df5dSnathanw int retval; 891c62a74e6Sthorpej 89271d484f9Schristos if (__predict_false(__uselibcstub)) 89371d484f9Schristos return __libc_thr_setcancelstate_stub(state, oldstate); 89471d484f9Schristos 895c62a74e6Sthorpej self = pthread__self(); 8960878df5dSnathanw retval = 0; 897c62a74e6Sthorpej 898f4fd6b79Sad pthread_mutex_lock(&self->pt_lock); 89950fa8db4Sad 900c62a74e6Sthorpej if (oldstate != NULL) { 9010878df5dSnathanw if (self->pt_flags & PT_FLAG_CS_DISABLED) 902c62a74e6Sthorpej *oldstate = PTHREAD_CANCEL_DISABLE; 903c62a74e6Sthorpej else 904c62a74e6Sthorpej *oldstate = PTHREAD_CANCEL_ENABLE; 905c62a74e6Sthorpej } 906c62a74e6Sthorpej 9070878df5dSnathanw if (state == PTHREAD_CANCEL_DISABLE) { 9080878df5dSnathanw self->pt_flags |= PT_FLAG_CS_DISABLED; 9090878df5dSnathanw if (self->pt_cancel) { 9100878df5dSnathanw self->pt_flags |= PT_FLAG_CS_PENDING; 9110878df5dSnathanw self->pt_cancel = 0; 9120878df5dSnathanw } 9130878df5dSnathanw } else if (state == PTHREAD_CANCEL_ENABLE) { 9140878df5dSnathanw self->pt_flags &= ~PT_FLAG_CS_DISABLED; 915c62a74e6Sthorpej /* 916c62a74e6Sthorpej * If a cancellation was requested while cancellation 917c62a74e6Sthorpej * was disabled, note that fact for future 918c62a74e6Sthorpej * cancellation tests. 919c62a74e6Sthorpej */ 9200878df5dSnathanw if (self->pt_flags & PT_FLAG_CS_PENDING) { 921c62a74e6Sthorpej self->pt_cancel = 1; 922c62a74e6Sthorpej /* This is not a deferred cancellation point. */ 9230878df5dSnathanw if (self->pt_flags & PT_FLAG_CS_ASYNC) { 924f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 925989565f8Sad pthread__cancelled(); 926c62a74e6Sthorpej } 9270878df5dSnathanw } 928c62a74e6Sthorpej } else 9290878df5dSnathanw retval = EINVAL; 930c62a74e6Sthorpej 931f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 93250fa8db4Sad 9330878df5dSnathanw return retval; 934c62a74e6Sthorpej } 935c62a74e6Sthorpej 936c62a74e6Sthorpej 937c62a74e6Sthorpej int 938c62a74e6Sthorpej pthread_setcanceltype(int type, int *oldtype) 939c62a74e6Sthorpej { 940c62a74e6Sthorpej pthread_t self; 9410878df5dSnathanw int retval; 942c62a74e6Sthorpej 943c62a74e6Sthorpej self = pthread__self(); 9440878df5dSnathanw retval = 0; 9450878df5dSnathanw 946f4fd6b79Sad pthread_mutex_lock(&self->pt_lock); 947c62a74e6Sthorpej 948c62a74e6Sthorpej if (oldtype != NULL) { 9490878df5dSnathanw if (self->pt_flags & PT_FLAG_CS_ASYNC) 950c62a74e6Sthorpej *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS; 951c62a74e6Sthorpej else 952c62a74e6Sthorpej *oldtype = PTHREAD_CANCEL_DEFERRED; 953c62a74e6Sthorpej } 954c62a74e6Sthorpej 955c62a74e6Sthorpej if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { 9560878df5dSnathanw self->pt_flags |= PT_FLAG_CS_ASYNC; 9570878df5dSnathanw if (self->pt_cancel) { 958f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 959989565f8Sad pthread__cancelled(); 9600878df5dSnathanw } 961c62a74e6Sthorpej } else if (type == PTHREAD_CANCEL_DEFERRED) 9620878df5dSnathanw self->pt_flags &= ~PT_FLAG_CS_ASYNC; 963c62a74e6Sthorpej else 9640878df5dSnathanw retval = EINVAL; 965c62a74e6Sthorpej 966f4fd6b79Sad pthread_mutex_unlock(&self->pt_lock); 96750fa8db4Sad 9680878df5dSnathanw return retval; 969c62a74e6Sthorpej } 970c62a74e6Sthorpej 971c62a74e6Sthorpej 972c62a74e6Sthorpej void 973989565f8Sad pthread_testcancel(void) 974c62a74e6Sthorpej { 975c62a74e6Sthorpej pthread_t self; 976c62a74e6Sthorpej 977c62a74e6Sthorpej self = pthread__self(); 978c62a74e6Sthorpej if (self->pt_cancel) 979989565f8Sad pthread__cancelled(); 980c62a74e6Sthorpej } 981c62a74e6Sthorpej 982c62a74e6Sthorpej 983c62a74e6Sthorpej /* 984c62a74e6Sthorpej * POSIX requires that certain functions return an error rather than 985c62a74e6Sthorpej * invoking undefined behavior even when handed completely bogus 986d79b47c3Srmind * pthread_t values, e.g. stack garbage. 987c62a74e6Sthorpej */ 988c62a74e6Sthorpej int 989d9adedd7Sad pthread__find(pthread_t id) 990c62a74e6Sthorpej { 991c62a74e6Sthorpej pthread_t target; 992d79b47c3Srmind int error; 993c62a74e6Sthorpej 9949583eeb2Sad pthread_rwlock_rdlock(&pthread__alltree_lock); 995d79b47c3Srmind target = rb_tree_find_node(&pthread__alltree, id); 996d79b47c3Srmind error = (target && target->pt_state != PT_STATE_DEAD) ? 0 : ESRCH; 9979583eeb2Sad pthread_rwlock_unlock(&pthread__alltree_lock); 998c62a74e6Sthorpej 999d79b47c3Srmind return error; 1000c62a74e6Sthorpej } 1001c62a74e6Sthorpej 1002c62a74e6Sthorpej 1003c62a74e6Sthorpej void 1004c62a74e6Sthorpej pthread__testcancel(pthread_t self) 1005c62a74e6Sthorpej { 1006c62a74e6Sthorpej 1007c62a74e6Sthorpej if (self->pt_cancel) 1008989565f8Sad pthread__cancelled(); 1009989565f8Sad } 1010989565f8Sad 1011989565f8Sad 1012989565f8Sad void 1013989565f8Sad pthread__cancelled(void) 1014989565f8Sad { 1015989565f8Sad pthread_mutex_t *droplock; 1016989565f8Sad pthread_t self; 1017989565f8Sad 1018989565f8Sad self = pthread__self(); 1019989565f8Sad droplock = self->pt_droplock; 1020989565f8Sad self->pt_droplock = NULL; 1021989565f8Sad 1022989565f8Sad if (droplock != NULL && pthread_mutex_held_np(droplock)) 1023989565f8Sad pthread_mutex_unlock(droplock); 1024989565f8Sad 1025c62a74e6Sthorpej pthread_exit(PTHREAD_CANCELED); 1026c62a74e6Sthorpej } 1027c62a74e6Sthorpej 1028c62a74e6Sthorpej 1029c62a74e6Sthorpej void 1030c62a74e6Sthorpej pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store) 1031c62a74e6Sthorpej { 1032c62a74e6Sthorpej pthread_t self; 1033c62a74e6Sthorpej struct pt_clean_t *entry; 1034c62a74e6Sthorpej 1035c62a74e6Sthorpej self = pthread__self(); 1036c62a74e6Sthorpej entry = store; 1037c62a74e6Sthorpej entry->ptc_cleanup = cleanup; 1038c62a74e6Sthorpej entry->ptc_arg = arg; 1039c62a74e6Sthorpej PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next); 1040c62a74e6Sthorpej } 1041c62a74e6Sthorpej 1042c62a74e6Sthorpej 1043c62a74e6Sthorpej void 1044c62a74e6Sthorpej pthread__cleanup_pop(int ex, void *store) 1045c62a74e6Sthorpej { 1046c62a74e6Sthorpej pthread_t self; 1047c62a74e6Sthorpej struct pt_clean_t *entry; 1048c62a74e6Sthorpej 1049c62a74e6Sthorpej self = pthread__self(); 1050c62a74e6Sthorpej entry = store; 1051c62a74e6Sthorpej 1052c62a74e6Sthorpej PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next); 1053c62a74e6Sthorpej if (ex) 1054c62a74e6Sthorpej (*entry->ptc_cleanup)(entry->ptc_arg); 1055c62a74e6Sthorpej } 1056c62a74e6Sthorpej 1057c62a74e6Sthorpej 1058783e2f6dSad int * 1059783e2f6dSad pthread__errno(void) 1060783e2f6dSad { 1061783e2f6dSad pthread_t self; 1062783e2f6dSad 106371d484f9Schristos if (__predict_false(__uselibcstub)) { 106471d484f9Schristos pthread__errorfunc(__FILE__, __LINE__, __func__, 106571d484f9Schristos "pthread__errno() requires linking with -lpthread"); 106671d484f9Schristos return __libc_thr_errno_stub(); 106771d484f9Schristos } 106871d484f9Schristos 1069783e2f6dSad self = pthread__self(); 1070783e2f6dSad 1071783e2f6dSad return &(self->pt_errno); 1072783e2f6dSad } 1073783e2f6dSad 10740c967901Snathanw ssize_t _sys_write(int, const void *, size_t); 10750c967901Snathanw 10768bcff70bSnathanw void 10770e6c93b9Sdrochner pthread__assertfunc(const char *file, int line, const char *function, 10780e6c93b9Sdrochner const char *expr) 10798bcff70bSnathanw { 10808bcff70bSnathanw char buf[1024]; 10818bcff70bSnathanw int len; 10828bcff70bSnathanw 10838bcff70bSnathanw /* 10848bcff70bSnathanw * snprintf should not acquire any locks, or we could 10858bcff70bSnathanw * end up deadlocked if the assert caller held locks. 10868bcff70bSnathanw */ 10878bcff70bSnathanw len = snprintf(buf, 1024, 10888bcff70bSnathanw "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n", 10898bcff70bSnathanw expr, file, line, 10908bcff70bSnathanw function ? ", function \"" : "", 10918bcff70bSnathanw function ? function : "", 10928bcff70bSnathanw function ? "\"" : ""); 10938bcff70bSnathanw 10940c967901Snathanw _sys_write(STDERR_FILENO, buf, (size_t)len); 10958bcff70bSnathanw (void)kill(getpid(), SIGABRT); 10968bcff70bSnathanw 10978bcff70bSnathanw _exit(1); 10988bcff70bSnathanw } 1099df277271Snathanw 1100df277271Snathanw 1101df277271Snathanw void 11020e6c93b9Sdrochner pthread__errorfunc(const char *file, int line, const char *function, 11030e6c93b9Sdrochner const char *msg) 1104df277271Snathanw { 1105df277271Snathanw char buf[1024]; 11060172694eSnathanw size_t len; 1107df277271Snathanw 11080172694eSnathanw if (pthread__diagassert == 0) 1109df277271Snathanw return; 1110df277271Snathanw 1111df277271Snathanw /* 1112df277271Snathanw * snprintf should not acquire any locks, or we could 1113df277271Snathanw * end up deadlocked if the assert caller held locks. 1114df277271Snathanw */ 1115df277271Snathanw len = snprintf(buf, 1024, 11160172694eSnathanw "%s: Error detected by libpthread: %s.\n" 11170172694eSnathanw "Detected by file \"%s\", line %d%s%s%s.\n" 11180172694eSnathanw "See pthread(3) for information.\n", 11190172694eSnathanw getprogname(), msg, file, line, 1120df277271Snathanw function ? ", function \"" : "", 1121df277271Snathanw function ? function : "", 11220172694eSnathanw function ? "\"" : ""); 1123df277271Snathanw 11240172694eSnathanw if (pthread__diagassert & DIAGASSERT_STDERR) 11250c967901Snathanw _sys_write(STDERR_FILENO, buf, len); 11260172694eSnathanw 11270172694eSnathanw if (pthread__diagassert & DIAGASSERT_SYSLOG) 11280172694eSnathanw syslog(LOG_DEBUG | LOG_USER, "%s", buf); 11290172694eSnathanw 11300172694eSnathanw if (pthread__diagassert & DIAGASSERT_ABORT) { 1131df277271Snathanw (void)kill(getpid(), SIGABRT); 1132df277271Snathanw _exit(1); 1133df277271Snathanw } 1134df277271Snathanw } 11351ac6a89bSad 1136fe9718acSad /* 1137ded26025Sad * Thread park/unpark operations. The kernel operations are 1138ded26025Sad * modelled after a brief description from "Multithreading in 1139ded26025Sad * the Solaris Operating Environment": 1140fe9718acSad * 1141fe9718acSad * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf 1142fe9718acSad */ 1143fe9718acSad 11441ac6a89bSad #define OOPS(msg) \ 1145858097f9Schristos pthread__errorfunc(__FILE__, __LINE__, __func__, msg) 11461ac6a89bSad 11471ac6a89bSad int 11482bcb8bf1Sad pthread__park(pthread_t self, pthread_mutex_t *lock, 114950fa8db4Sad pthread_queue_t *queue, const struct timespec *abstime, 115050fa8db4Sad int cancelpt, const void *hint) 11511ac6a89bSad { 1152c3f8e2eeSad int rv, error; 11538ccc6e06Sad void *obj; 11541ac6a89bSad 1155c3f8e2eeSad /* 115666ac2ffaSad * For non-interlocked release of mutexes we need a store 115766ac2ffaSad * barrier before incrementing pt_blocking away from zero. 11582bcb8bf1Sad * This is provided by pthread_mutex_unlock(). 115966ac2ffaSad */ 11602bcb8bf1Sad self->pt_willpark = 1; 11612bcb8bf1Sad pthread_mutex_unlock(lock); 11622bcb8bf1Sad self->pt_willpark = 0; 116366ac2ffaSad self->pt_blocking++; 116466ac2ffaSad 116566ac2ffaSad /* 1166ded26025Sad * Wait until we are awoken by a pending unpark operation, 1167ded26025Sad * a signal, an unpark posted after we have gone asleep, 1168ded26025Sad * or an expired timeout. 116950fa8db4Sad * 11702bcb8bf1Sad * It is fine to test the value of pt_sleepobj without 11712bcb8bf1Sad * holding any locks, because: 117250fa8db4Sad * 117350fa8db4Sad * o Only the blocking thread (this thread) ever sets them 117450fa8db4Sad * to a non-NULL value. 117550fa8db4Sad * 117650fa8db4Sad * o Other threads may set them NULL, but if they do so they 117750fa8db4Sad * must also make this thread return from _lwp_park. 117850fa8db4Sad * 117950fa8db4Sad * o _lwp_park, _lwp_unpark and _lwp_unpark_all are system 118050fa8db4Sad * calls and all make use of spinlocks in the kernel. So 118150fa8db4Sad * these system calls act as full memory barriers, and will 118250fa8db4Sad * ensure that the calling CPU's store buffers are drained. 118350fa8db4Sad * In combination with the spinlock release before unpark, 118450fa8db4Sad * this means that modification of pt_sleepobj/onq by another 118550fa8db4Sad * thread will become globally visible before that thread 118650fa8db4Sad * schedules an unpark operation on this thread. 1187c3f8e2eeSad * 1188c3f8e2eeSad * Note: the test in the while() statement dodges the park op if 1189c3f8e2eeSad * we have already been awoken, unless there is another thread to 1190c3f8e2eeSad * awaken. This saves a syscall - if we were already awakened, 1191c3f8e2eeSad * the next call to _lwp_park() would need to return early in order 1192c3f8e2eeSad * to eat the previous wakeup. 1193ded26025Sad */ 1194ded26025Sad rv = 0; 11952bcb8bf1Sad do { 1196c3f8e2eeSad /* 1197c3f8e2eeSad * If we deferred unparking a thread, arrange to 1198c3f8e2eeSad * have _lwp_park() restart it before blocking. 1199c3f8e2eeSad */ 1200cdce479aSchristos error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, abstime, 1201cdce479aSchristos self->pt_unpark, hint, hint); 1202c3f8e2eeSad self->pt_unpark = 0; 1203c3f8e2eeSad if (error != 0) { 1204ded26025Sad switch (rv = errno) { 12051ac6a89bSad case EINTR: 12061ac6a89bSad case EALREADY: 1207ded26025Sad rv = 0; 1208ded26025Sad break; 1209ded26025Sad case ETIMEDOUT: 12101ac6a89bSad break; 12111ac6a89bSad default: 12121ac6a89bSad OOPS("_lwp_park failed"); 12131ac6a89bSad break; 12141ac6a89bSad } 1215ded26025Sad } 12160c61b6a6Sad /* Check for cancellation. */ 1217d9adedd7Sad if (cancelpt && self->pt_cancel) 12180c61b6a6Sad rv = EINTR; 12192bcb8bf1Sad } while (self->pt_sleepobj != NULL && rv == 0); 12201ac6a89bSad 1221ded26025Sad /* 1222ded26025Sad * If we have been awoken early but are still on the queue, 122350fa8db4Sad * then remove ourself. Again, it's safe to do the test 122450fa8db4Sad * without holding any locks. 1225ded26025Sad */ 12262bcb8bf1Sad if (__predict_false(self->pt_sleepobj != NULL)) { 12272bcb8bf1Sad pthread_mutex_lock(lock); 12282bcb8bf1Sad if ((obj = self->pt_sleepobj) != NULL) { 12291ac6a89bSad PTQ_REMOVE(queue, self, pt_sleep); 12301ac6a89bSad self->pt_sleepobj = NULL; 12318ccc6e06Sad if (obj != NULL && self->pt_early != NULL) 12328ccc6e06Sad (*self->pt_early)(obj); 1233792cc0e1Sad } 12342bcb8bf1Sad pthread_mutex_unlock(lock); 123550fa8db4Sad } 12368ccc6e06Sad self->pt_early = NULL; 123766ac2ffaSad self->pt_blocking--; 12382bcb8bf1Sad membar_sync(); 12391ac6a89bSad 12401ac6a89bSad return rv; 12411ac6a89bSad } 12421ac6a89bSad 12431ac6a89bSad void 12442bcb8bf1Sad pthread__unpark(pthread_queue_t *queue, pthread_t self, 12452bcb8bf1Sad pthread_mutex_t *interlock) 12461ac6a89bSad { 12472bcb8bf1Sad pthread_t target; 1248c0038aadSmatt u_int max; 1249c0038aadSmatt size_t nwaiters; 12501ac6a89bSad 12512bcb8bf1Sad max = pthread__unpark_max; 12522bcb8bf1Sad nwaiters = self->pt_nwaiters; 12532bcb8bf1Sad target = PTQ_FIRST(queue); 12542bcb8bf1Sad if (nwaiters == max) { 12552bcb8bf1Sad /* Overflow. */ 12562bcb8bf1Sad (void)_lwp_unpark_all(self->pt_waiters, nwaiters, 12572bcb8bf1Sad __UNVOLATILE(&interlock->ptm_waiters)); 12582bcb8bf1Sad nwaiters = 0; 12590c61b6a6Sad } 12601ac6a89bSad target->pt_sleepobj = NULL; 12612bcb8bf1Sad self->pt_waiters[nwaiters++] = target->pt_lid; 12622bcb8bf1Sad PTQ_REMOVE(queue, target, pt_sleep); 12632bcb8bf1Sad self->pt_nwaiters = nwaiters; 12642bcb8bf1Sad pthread__mutex_deferwake(self, interlock); 1265c3f8e2eeSad } 12661ac6a89bSad 12671ac6a89bSad void 12682bcb8bf1Sad pthread__unpark_all(pthread_queue_t *queue, pthread_t self, 12692bcb8bf1Sad pthread_mutex_t *interlock) 12701ac6a89bSad { 12712bcb8bf1Sad pthread_t target; 1272c0038aadSmatt u_int max; 1273c0038aadSmatt size_t nwaiters; 1274ded26025Sad 12752bcb8bf1Sad max = pthread__unpark_max; 12762bcb8bf1Sad nwaiters = self->pt_nwaiters; 12772bcb8bf1Sad PTQ_FOREACH(target, queue, pt_sleep) { 12782bcb8bf1Sad if (nwaiters == max) { 12792bcb8bf1Sad /* Overflow. */ 12802bcb8bf1Sad (void)_lwp_unpark_all(self->pt_waiters, nwaiters, 12812bcb8bf1Sad __UNVOLATILE(&interlock->ptm_waiters)); 12822bcb8bf1Sad nwaiters = 0; 1283ded26025Sad } 12842bcb8bf1Sad target->pt_sleepobj = NULL; 12852bcb8bf1Sad self->pt_waiters[nwaiters++] = target->pt_lid; 12861ac6a89bSad } 12872bcb8bf1Sad self->pt_nwaiters = nwaiters; 12882bcb8bf1Sad PTQ_INIT(queue); 12892bcb8bf1Sad pthread__mutex_deferwake(self, interlock); 12901ac6a89bSad } 12911ac6a89bSad 12921ac6a89bSad #undef OOPS 12939e287199Sad 1294a5644095Sjoerg static void 1295a5644095Sjoerg pthread__initmainstack(void) 12969e287199Sad { 1297a5644095Sjoerg struct rlimit slimit; 1298a5644095Sjoerg const AuxInfo *aux; 1299*5f391f4aSjoerg size_t size, len; 1300*5f391f4aSjoerg int mib[2]; 1301*5f391f4aSjoerg unsigned int value; 13029e287199Sad 1303a5644095Sjoerg _DIAGASSERT(_dlauxinfo() != NULL); 13049e287199Sad 1305a5644095Sjoerg if (getrlimit(RLIMIT_STACK, &slimit) == -1) 1306841339f0Smanu err(EXIT_FAILURE, 1307841339f0Smanu "Couldn't get stack resource consumption limits"); 1308bfcb2008Sjoerg size = slimit.rlim_cur; 1309841339f0Smanu pthread__main->pt_stack.ss_size = size; 1310*5f391f4aSjoerg pthread__main->pt_guardsize = pthread__pagesize; 1311*5f391f4aSjoerg 1312*5f391f4aSjoerg mib[0] = CTL_VM; 1313*5f391f4aSjoerg mib[1] = VM_GUARD_SIZE; 1314*5f391f4aSjoerg len = sizeof(value); 1315*5f391f4aSjoerg if (sysctl(mib, __arraycount(mib), &value, &len, NULL, 0) == 0) 1316*5f391f4aSjoerg pthread__main->pt_guardsize = value; 13179e287199Sad 1318a5644095Sjoerg for (aux = _dlauxinfo(); aux->a_type != AT_NULL; ++aux) { 1319a5644095Sjoerg if (aux->a_type == AT_STACKBASE) { 1320841339f0Smanu pthread__main->pt_stack.ss_sp = (void *)aux->a_v; 1321bfcb2008Sjoerg #ifdef __MACHINE_STACK_GROWS_UP 1322841339f0Smanu pthread__main->pt_stack.ss_sp = (void *)aux->a_v; 1323bfcb2008Sjoerg #else 1324841339f0Smanu pthread__main->pt_stack.ss_sp = (char *)aux->a_v - size; 1325bfcb2008Sjoerg #endif 1326a5644095Sjoerg break; 13279e287199Sad } 1328a5644095Sjoerg } 1329a5644095Sjoerg } 13309e287199Sad 13319e287199Sad /* 13329e287199Sad * Set up the slightly special stack for the "initial" thread, which 13339e287199Sad * runs on the normal system stack, and thus gets slightly different 13349e287199Sad * treatment. 13359e287199Sad */ 13369e287199Sad static void 13379e287199Sad pthread__initmain(pthread_t *newt) 13389e287199Sad { 13399e287199Sad char *value; 13409e287199Sad 1341a5644095Sjoerg pthread__initmainstack(); 134215e9cec1Sad 134315e9cec1Sad value = pthread__getenv("PTHREAD_STACKSIZE"); 134415e9cec1Sad if (value != NULL) { 13459e287199Sad pthread__stacksize = atoi(value) * 1024; 1346841339f0Smanu if (pthread__stacksize > pthread__main->pt_stack.ss_size) 1347841339f0Smanu pthread__stacksize = pthread__main->pt_stack.ss_size; 13489e287199Sad } 13499e287199Sad if (pthread__stacksize == 0) 1350841339f0Smanu pthread__stacksize = pthread__main->pt_stack.ss_size; 1351a5644095Sjoerg pthread__stacksize += pthread__pagesize - 1; 1352677d666cSdrochner pthread__stacksize &= ~(pthread__pagesize - 1); 1353a5644095Sjoerg if (pthread__stacksize < 4 * pthread__pagesize) 13549e287199Sad errx(1, "Stacksize limit is too low, minimum %zd kbyte.", 1355a5644095Sjoerg 4 * pthread__pagesize / 1024); 13569e287199Sad 1357841339f0Smanu *newt = pthread__main; 1358082d249aSpooka #if defined(_PTHREAD_GETTCB_EXT) 1359841339f0Smanu pthread__main->pt_tls = _PTHREAD_GETTCB_EXT(); 1360082d249aSpooka #elif defined(__HAVE___LWP_GETTCB_FAST) 1361841339f0Smanu pthread__main->pt_tls = __lwp_gettcb_fast(); 1362928f301bSjoerg #else 1363841339f0Smanu pthread__main->pt_tls = _lwp_getprivate(); 1364928f301bSjoerg #endif 1365841339f0Smanu pthread__main->pt_tls->tcb_pthread = pthread__main; 13669e287199Sad } 13679e287199Sad 1368d79b47c3Srmind static signed int 1369cb292d56Schristos /*ARGSUSED*/ 1370d79b47c3Srmind pthread__cmp(void *ctx, const void *n1, const void *n2) 13719583eeb2Sad { 13729d94c026Sapb const uintptr_t p1 = (const uintptr_t)n1; 13739d94c026Sapb const uintptr_t p2 = (const uintptr_t)n2; 1374f1c955a1Sdrochner 1375d79b47c3Srmind if (p1 < p2) 1376d79b47c3Srmind return -1; 1377d79b47c3Srmind if (p1 > p2) 1378f1c955a1Sdrochner return 1; 1379d79b47c3Srmind return 0; 13809583eeb2Sad } 13819583eeb2Sad 138215e9cec1Sad /* Because getenv() wants to use locks. */ 138315e9cec1Sad char * 138415e9cec1Sad pthread__getenv(const char *name) 138515e9cec1Sad { 1386103af04bStron extern char **environ; 1387103af04bStron size_t l_name, offset; 138815e9cec1Sad 1389103af04bStron l_name = strlen(name); 1390103af04bStron for (offset = 0; environ[offset] != NULL; offset++) { 1391103af04bStron if (strncmp(name, environ[offset], l_name) == 0 && 1392103af04bStron environ[offset][l_name] == '=') { 1393103af04bStron return environ[offset] + l_name + 1; 1394103af04bStron } 1395103af04bStron } 1396103af04bStron 1397103af04bStron return NULL; 139815e9cec1Sad } 139915e9cec1Sad 14002bcb8bf1Sad pthread_mutex_t * 14012bcb8bf1Sad pthread__hashlock(volatile const void *p) 14022bcb8bf1Sad { 14032bcb8bf1Sad uintptr_t v; 140415e9cec1Sad 14052bcb8bf1Sad v = (uintptr_t)p; 14062bcb8bf1Sad return &hashlocks[((v >> 9) ^ (v >> 3)) & (NHASHLOCK - 1)].mutex; 14072bcb8bf1Sad } 1408cbd43ffaSad 1409cbd43ffaSad int 1410cbd43ffaSad pthread__checkpri(int pri) 1411cbd43ffaSad { 1412c0038aadSmatt static int havepri; 1413c0038aadSmatt static long min, max; 1414cbd43ffaSad 1415cbd43ffaSad if (!havepri) { 1416cbd43ffaSad min = sysconf(_SC_SCHED_PRI_MIN); 1417cbd43ffaSad max = sysconf(_SC_SCHED_PRI_MAX); 1418cbd43ffaSad havepri = 1; 1419cbd43ffaSad } 1420cbd43ffaSad return (pri < min || pri > max) ? EINVAL : 0; 1421cbd43ffaSad } 1422