xref: /netbsd-src/lib/libpthread/pthread.c (revision 2bcb8bf1c4a2927911cb14c03d6258adc68b0c08)
1*2bcb8bf1Sad /*	$NetBSD: pthread.c,v 1.101 2008/05/25 17:05:28 ad Exp $	*/
2c62a74e6Sthorpej 
3c62a74e6Sthorpej /*-
4eceac52fSad  * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5c62a74e6Sthorpej  * All rights reserved.
6c62a74e6Sthorpej  *
7c62a74e6Sthorpej  * This code is derived from software contributed to The NetBSD Foundation
81ac6a89bSad  * by Nathan J. Williams and Andrew Doran.
9c62a74e6Sthorpej  *
10c62a74e6Sthorpej  * Redistribution and use in source and binary forms, with or without
11c62a74e6Sthorpej  * modification, are permitted provided that the following conditions
12c62a74e6Sthorpej  * are met:
13c62a74e6Sthorpej  * 1. Redistributions of source code must retain the above copyright
14c62a74e6Sthorpej  *    notice, this list of conditions and the following disclaimer.
15c62a74e6Sthorpej  * 2. Redistributions in binary form must reproduce the above copyright
16c62a74e6Sthorpej  *    notice, this list of conditions and the following disclaimer in the
17c62a74e6Sthorpej  *    documentation and/or other materials provided with the distribution.
18c62a74e6Sthorpej  *
19c62a74e6Sthorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20c62a74e6Sthorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21c62a74e6Sthorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22c62a74e6Sthorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23c62a74e6Sthorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24c62a74e6Sthorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25c62a74e6Sthorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26c62a74e6Sthorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27c62a74e6Sthorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28c62a74e6Sthorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29c62a74e6Sthorpej  * POSSIBILITY OF SUCH DAMAGE.
30c62a74e6Sthorpej  */
31c62a74e6Sthorpej 
32f043c0fbSlukem #include <sys/cdefs.h>
33*2bcb8bf1Sad __RCSID("$NetBSD: pthread.c,v 1.101 2008/05/25 17:05:28 ad Exp $");
349e287199Sad 
359e287199Sad #define	__EXPOSE_STACK	1
369e287199Sad 
379e287199Sad #include <sys/param.h>
389e287199Sad #include <sys/mman.h>
399e287199Sad #include <sys/sysctl.h>
4066ac2ffaSad #include <sys/lwpctl.h>
41f043c0fbSlukem 
42c62a74e6Sthorpej #include <err.h>
43c62a74e6Sthorpej #include <errno.h>
44c62a74e6Sthorpej #include <lwp.h>
45c62a74e6Sthorpej #include <signal.h>
468bcff70bSnathanw #include <stdio.h>
47c62a74e6Sthorpej #include <stdlib.h>
48c62a74e6Sthorpej #include <string.h>
490172694eSnathanw #include <syslog.h>
50c62a74e6Sthorpej #include <ucontext.h>
518bcff70bSnathanw #include <unistd.h>
52c62a74e6Sthorpej #include <sched.h>
539e287199Sad 
54c62a74e6Sthorpej #include "pthread.h"
55c62a74e6Sthorpej #include "pthread_int.h"
56c62a74e6Sthorpej 
579583eeb2Sad pthread_rwlock_t pthread__alltree_lock = PTHREAD_RWLOCK_INITIALIZER;
589583eeb2Sad RB_HEAD(__pthread__alltree, __pthread_st) pthread__alltree;
599583eeb2Sad 
609583eeb2Sad #ifndef lint
619583eeb2Sad static int	pthread__cmp(struct __pthread_st *, struct __pthread_st *);
629583eeb2Sad RB_PROTOTYPE_STATIC(__pthread__alltree, __pthread_st, pt_alltree, pthread__cmp)
639583eeb2Sad #endif
649583eeb2Sad 
6566ac2ffaSad static void	pthread__create_tramp(pthread_t, void *(*)(void *), void *);
6650fa8db4Sad static void	pthread__initthread(pthread_t);
67b8833ff5Sad static void	pthread__scrubthread(pthread_t, char *, int);
689e287199Sad static int	pthread__stackid_setup(void *, size_t, pthread_t *);
699e287199Sad static int	pthread__stackalloc(pthread_t *);
709e287199Sad static void	pthread__initmain(pthread_t *);
7166ac2ffaSad static void	pthread__fork_callback(void);
72989565f8Sad static void	pthread__reap(pthread_t);
73c6409540Schristos static void	pthread__child_callback(void);
74c6409540Schristos static void	pthread__start(void);
75c62a74e6Sthorpej 
7615e9cec1Sad void	pthread__init(void);
7715e9cec1Sad 
78c62a74e6Sthorpej int pthread__started;
79f4fd6b79Sad pthread_mutex_t pthread__deadqueue_lock = PTHREAD_MUTEX_INITIALIZER;
8050fa8db4Sad pthread_queue_t pthread__deadqueue;
81f1b2c1c4Sad pthread_queue_t pthread__allqueue;
82c62a74e6Sthorpej 
83c62a74e6Sthorpej static pthread_attr_t pthread_default_attr;
8466ac2ffaSad static lwpctl_t pthread__dummy_lwpctl = { .lc_curcpu = LWPCTL_CPU_NONE };
8566ac2ffaSad static pthread_t pthread__first;
86c62a74e6Sthorpej 
870172694eSnathanw enum {
880172694eSnathanw 	DIAGASSERT_ABORT =	1<<0,
890172694eSnathanw 	DIAGASSERT_STDERR =	1<<1,
900172694eSnathanw 	DIAGASSERT_SYSLOG =	1<<2
910172694eSnathanw };
92df277271Snathanw 
930172694eSnathanw static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
94df277271Snathanw 
95c94f5a91Sad int pthread__concurrency;
96c94f5a91Sad int pthread__nspins;
97ded26025Sad int pthread__unpark_max = PTHREAD__UNPARK_MAX;
981d42dbd2Sad int pthread__osrev;
99f2f10664Scl 
1009e287199Sad /*
1019e287199Sad  * We have to initialize the pthread_stack* variables here because
1029e287199Sad  * mutexes are used before pthread_init() and thus pthread__initmain()
1039e287199Sad  * are called.  Since mutexes only save the stack pointer and not a
1049e287199Sad  * pointer to the thread data, it is safe to change the mapping from
1059e287199Sad  * stack pointer to thread data afterwards.
1069e287199Sad  */
1079e287199Sad #define	_STACKSIZE_LG 18
1089e287199Sad int	pthread__stacksize_lg = _STACKSIZE_LG;
1099e287199Sad size_t	pthread__stacksize = 1 << _STACKSIZE_LG;
1109e287199Sad vaddr_t	pthread__stackmask = (1 << _STACKSIZE_LG) - 1;
1119583eeb2Sad vaddr_t pthread__threadmask = (vaddr_t)~((1 << _STACKSIZE_LG) - 1);
1129e287199Sad #undef	_STACKSIZE_LG
1139e287199Sad 
114f782e995Sdrochner int _sys___sigprocmask14(int, const sigset_t *, sigset_t *);
115f782e995Sdrochner 
116c62a74e6Sthorpej __strong_alias(__libc_thr_self,pthread_self)
1177dc01dbfSthorpej __strong_alias(__libc_thr_create,pthread_create)
1187dc01dbfSthorpej __strong_alias(__libc_thr_exit,pthread_exit)
119c62a74e6Sthorpej __strong_alias(__libc_thr_errno,pthread__errno)
1209e5c8705Snathanw __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
121095b25e7Sdrochner __strong_alias(__libc_thr_equal,pthread_equal)
12215e9cec1Sad __strong_alias(__libc_thr_init,pthread__init)
123c62a74e6Sthorpej 
124c62a74e6Sthorpej /*
125c62a74e6Sthorpej  * Static library kludge.  Place a reference to a symbol any library
126c62a74e6Sthorpej  * file which does not already have a reference here.
127c62a74e6Sthorpej  */
128c62a74e6Sthorpej extern int pthread__cancel_stub_binder;
129c62a74e6Sthorpej 
130c62a74e6Sthorpej void *pthread__static_lib_binder[] = {
131c62a74e6Sthorpej 	&pthread__cancel_stub_binder,
132c62a74e6Sthorpej 	pthread_cond_init,
133c62a74e6Sthorpej 	pthread_mutex_init,
134c62a74e6Sthorpej 	pthread_rwlock_init,
135c62a74e6Sthorpej 	pthread_barrier_init,
136c62a74e6Sthorpej 	pthread_key_create,
137bd9a18b7Snathanw 	pthread_setspecific,
138c62a74e6Sthorpej };
139c62a74e6Sthorpej 
140*2bcb8bf1Sad #define	NHASHLOCK	64
141*2bcb8bf1Sad 
142*2bcb8bf1Sad static union hashlock {
143*2bcb8bf1Sad 	pthread_mutex_t	mutex;
144*2bcb8bf1Sad 	char		pad[64];
145*2bcb8bf1Sad } hashlocks[NHASHLOCK] __aligned(64);
146*2bcb8bf1Sad 
147c62a74e6Sthorpej /*
148c62a74e6Sthorpej  * This needs to be started by the library loading code, before main()
149c62a74e6Sthorpej  * gets to run, for various things that use the state of the initial thread
150c62a74e6Sthorpej  * to work properly (thread-specific data is an application-visible example;
151c62a74e6Sthorpej  * spinlock counts for mutexes is an internal example).
152c62a74e6Sthorpej  */
153c62a74e6Sthorpej void
15415e9cec1Sad pthread__init(void)
155c62a74e6Sthorpej {
156c62a74e6Sthorpej 	pthread_t first;
1570172694eSnathanw 	char *p;
158b8833ff5Sad 	int i, mib[2];
159f2f10664Scl 	size_t len;
160c62a74e6Sthorpej 	extern int __isthreaded;
161c62a74e6Sthorpej 
162f2f10664Scl 	mib[0] = CTL_HW;
163f2f10664Scl 	mib[1] = HW_NCPU;
164f2f10664Scl 
165b8833ff5Sad 	len = sizeof(pthread__concurrency);
166b8833ff5Sad 	if (sysctl(mib, 2, &pthread__concurrency, &len, NULL, 0) == -1)
167792cc0e1Sad 		err(1, "sysctl(hw.ncpu");
168f2f10664Scl 
169c3f8e2eeSad 	mib[0] = CTL_KERN;
170c3f8e2eeSad 	mib[1] = KERN_OSREV;
171c3f8e2eeSad 
172c3f8e2eeSad 	len = sizeof(pthread__osrev);
173c3f8e2eeSad 	if (sysctl(mib, 2, &pthread__osrev, &len, NULL, 0) == -1)
174c3f8e2eeSad 		err(1, "sysctl(hw.osrevision");
175c3f8e2eeSad 
176c62a74e6Sthorpej 	/* Initialize locks first; they're needed elsewhere. */
177b8833ff5Sad 	pthread__lockprim_init();
178*2bcb8bf1Sad 	for (i = 0; i < NHASHLOCK; i++) {
179*2bcb8bf1Sad 		pthread_mutex_init(&hashlocks[i].mutex, NULL);
180*2bcb8bf1Sad 	}
181f2f10664Scl 
182b8833ff5Sad 	/* Fetch parameters. */
1833247035dSad 	i = (int)_lwp_unpark_all(NULL, 0, NULL);
1843247035dSad 	if (i == -1)
1853247035dSad 		err(1, "_lwp_unpark_all");
186ded26025Sad 	if (i < pthread__unpark_max)
187ded26025Sad 		pthread__unpark_max = i;
188c62a74e6Sthorpej 
189c62a74e6Sthorpej 	/* Basic data structure setup */
190c62a74e6Sthorpej 	pthread_attr_init(&pthread_default_attr);
191f1b2c1c4Sad 	PTQ_INIT(&pthread__allqueue);
192c62a74e6Sthorpej 	PTQ_INIT(&pthread__deadqueue);
1939583eeb2Sad 	RB_INIT(&pthread__alltree);
1949e287199Sad 
195c62a74e6Sthorpej 	/* Create the thread structure corresponding to main() */
196c62a74e6Sthorpej 	pthread__initmain(&first);
19750fa8db4Sad 	pthread__initthread(first);
198b8833ff5Sad 	pthread__scrubthread(first, NULL, 0);
1991ac6a89bSad 
2001ac6a89bSad 	first->pt_lid = _lwp_self();
201f1b2c1c4Sad 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
2029583eeb2Sad 	RB_INSERT(__pthread__alltree, &pthread__alltree, first);
203c62a74e6Sthorpej 
204eceac52fSad 	if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &first->pt_lwpctl) != 0) {
205eceac52fSad 		err(1, "_lwp_ctl");
206eceac52fSad 	}
20766ac2ffaSad 
208c62a74e6Sthorpej 	/* Start subsystems */
209c62a74e6Sthorpej 	PTHREAD_MD_INIT
210c62a74e6Sthorpej 
21115e9cec1Sad 	for (p = pthread__getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
2120172694eSnathanw 		switch (*p) {
2130172694eSnathanw 		case 'a':
2140172694eSnathanw 			pthread__diagassert |= DIAGASSERT_ABORT;
2150172694eSnathanw 			break;
2160172694eSnathanw 		case 'A':
2170172694eSnathanw 			pthread__diagassert &= ~DIAGASSERT_ABORT;
2180172694eSnathanw 			break;
2190172694eSnathanw 		case 'e':
2200172694eSnathanw 			pthread__diagassert |= DIAGASSERT_STDERR;
2210172694eSnathanw 			break;
2220172694eSnathanw 		case 'E':
2230172694eSnathanw 			pthread__diagassert &= ~DIAGASSERT_STDERR;
2240172694eSnathanw 			break;
2250172694eSnathanw 		case 'l':
2260172694eSnathanw 			pthread__diagassert |= DIAGASSERT_SYSLOG;
2270172694eSnathanw 			break;
2280172694eSnathanw 		case 'L':
2290172694eSnathanw 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
2300172694eSnathanw 			break;
231df277271Snathanw 		}
2320172694eSnathanw 	}
2330172694eSnathanw 
234c62a74e6Sthorpej 	/* Tell libc that we're here and it should role-play accordingly. */
23566ac2ffaSad 	pthread__first = first;
23666ac2ffaSad 	pthread_atfork(NULL, NULL, pthread__fork_callback);
237c62a74e6Sthorpej 	__isthreaded = 1;
238c62a74e6Sthorpej }
239c62a74e6Sthorpej 
2402a4cef11Snathanw static void
24166ac2ffaSad pthread__fork_callback(void)
24266ac2ffaSad {
24366ac2ffaSad 
24466ac2ffaSad 	/* lwpctl state is not copied across fork. */
245eceac52fSad 	if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &pthread__first->pt_lwpctl)) {
246eceac52fSad 		err(1, "_lwp_ctl");
247eceac52fSad 	}
24866ac2ffaSad }
24966ac2ffaSad 
25066ac2ffaSad static void
2512a4cef11Snathanw pthread__child_callback(void)
2522a4cef11Snathanw {
25366ac2ffaSad 
2542a4cef11Snathanw 	/*
2552a4cef11Snathanw 	 * Clean up data structures that a forked child process might
2562a4cef11Snathanw 	 * trip over. Note that if threads have been created (causing
2572a4cef11Snathanw 	 * this handler to be registered) the standards say that the
2582a4cef11Snathanw 	 * child will trigger undefined behavior if it makes any
2592a4cef11Snathanw 	 * pthread_* calls (or any other calls that aren't
2602a4cef11Snathanw 	 * async-signal-safe), so we don't really have to clean up
2612a4cef11Snathanw 	 * much. Anything that permits some pthread_* calls to work is
2622a4cef11Snathanw 	 * merely being polite.
2632a4cef11Snathanw 	 */
2642a4cef11Snathanw 	pthread__started = 0;
2652a4cef11Snathanw }
266c62a74e6Sthorpej 
2670e675542Schs static void
268c62a74e6Sthorpej pthread__start(void)
269c62a74e6Sthorpej {
270c62a74e6Sthorpej 
271ff14fbf2Snathanw 	/*
272ff14fbf2Snathanw 	 * Per-process timers are cleared by fork(); despite the
273ff14fbf2Snathanw 	 * various restrictions on fork() and threads, it's legal to
274ff14fbf2Snathanw 	 * fork() before creating any threads.
275ff14fbf2Snathanw 	 */
2762a4cef11Snathanw 	pthread_atfork(NULL, NULL, pthread__child_callback);
277c62a74e6Sthorpej }
278c62a74e6Sthorpej 
279c62a74e6Sthorpej 
280c62a74e6Sthorpej /* General-purpose thread data structure sanitization. */
28150fa8db4Sad /* ARGSUSED */
28250fa8db4Sad static void
28350fa8db4Sad pthread__initthread(pthread_t t)
284c62a74e6Sthorpej {
285c62a74e6Sthorpej 
28615e9cec1Sad 	t->pt_self = t;
287c62a74e6Sthorpej 	t->pt_magic = PT_MAGIC;
288c3f8e2eeSad 	t->pt_willpark = 0;
289c3f8e2eeSad 	t->pt_unpark = 0;
2908ccc6e06Sad 	t->pt_nwaiters = 0;
291c3f8e2eeSad 	t->pt_sleepobj = NULL;
292c3f8e2eeSad 	t->pt_signalled = 0;
293b8833ff5Sad 	t->pt_havespecific = 0;
2948ccc6e06Sad 	t->pt_early = NULL;
29566ac2ffaSad 	t->pt_lwpctl = &pthread__dummy_lwpctl;
29666ac2ffaSad 	t->pt_blocking = 0;
297989565f8Sad 	t->pt_droplock = NULL;
2981ac6a89bSad 
29915e9cec1Sad 	memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops));
300f4fd6b79Sad 	pthread_mutex_init(&t->pt_lock, NULL);
301c62a74e6Sthorpej 	PTQ_INIT(&t->pt_cleanup_stack);
302989565f8Sad 	pthread_cond_init(&t->pt_joiners, NULL);
303b565a56cSad 	memset(&t->pt_specific, 0, sizeof(t->pt_specific));
304c62a74e6Sthorpej }
305c62a74e6Sthorpej 
306b8833ff5Sad static void
307b8833ff5Sad pthread__scrubthread(pthread_t t, char *name, int flags)
308b8833ff5Sad {
309b8833ff5Sad 
310b8833ff5Sad 	t->pt_state = PT_STATE_RUNNING;
311b8833ff5Sad 	t->pt_exitval = NULL;
312b8833ff5Sad 	t->pt_flags = flags;
313b8833ff5Sad 	t->pt_cancel = 0;
314b8833ff5Sad 	t->pt_errno = 0;
315b8833ff5Sad 	t->pt_name = name;
316b8833ff5Sad 	t->pt_lid = 0;
317b8833ff5Sad }
318b8833ff5Sad 
319c62a74e6Sthorpej 
320c62a74e6Sthorpej int
321c62a74e6Sthorpej pthread_create(pthread_t *thread, const pthread_attr_t *attr,
322c62a74e6Sthorpej 	    void *(*startfunc)(void *), void *arg)
323c62a74e6Sthorpej {
324d9adedd7Sad 	pthread_t newthread;
325c62a74e6Sthorpej 	pthread_attr_t nattr;
326b33971b9Sthorpej 	struct pthread_attr_private *p;
3273ca3d0b1Schristos 	char * volatile name;
328ed964af1Sad 	unsigned long flag;
329ed964af1Sad 	int ret;
330c62a74e6Sthorpej 
331c62a74e6Sthorpej 	/*
332c62a74e6Sthorpej 	 * It's okay to check this without a lock because there can
333c62a74e6Sthorpej 	 * only be one thread before it becomes true.
334c62a74e6Sthorpej 	 */
335c62a74e6Sthorpej 	if (pthread__started == 0) {
336c62a74e6Sthorpej 		pthread__start();
337c62a74e6Sthorpej 		pthread__started = 1;
338c62a74e6Sthorpej 	}
339c62a74e6Sthorpej 
340c62a74e6Sthorpej 	if (attr == NULL)
341c62a74e6Sthorpej 		nattr = pthread_default_attr;
342e81f9f17Sdrochner 	else if (attr->pta_magic == PT_ATTR_MAGIC)
343c62a74e6Sthorpej 		nattr = *attr;
344c62a74e6Sthorpej 	else
345c62a74e6Sthorpej 		return EINVAL;
346c62a74e6Sthorpej 
347b33971b9Sthorpej 	/* Fetch misc. attributes from the attr structure. */
348508a50acSnathanw 	name = NULL;
349508a50acSnathanw 	if ((p = nattr.pta_private) != NULL)
350508a50acSnathanw 		if (p->ptap_name[0] != '\0')
351b33971b9Sthorpej 			if ((name = strdup(p->ptap_name)) == NULL)
352b33971b9Sthorpej 				return ENOMEM;
353c62a74e6Sthorpej 
354a014cf23Sad 	newthread = NULL;
355c62a74e6Sthorpej 
356b8833ff5Sad 	/*
357b8833ff5Sad 	 * Try to reclaim a dead thread.
358b8833ff5Sad 	 */
359a014cf23Sad 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
360f4fd6b79Sad 		pthread_mutex_lock(&pthread__deadqueue_lock);
361c62a74e6Sthorpej 		newthread = PTQ_FIRST(&pthread__deadqueue);
3624cdc2ed8Syamt 		if (newthread != NULL) {
363b8833ff5Sad 			PTQ_REMOVE(&pthread__deadqueue, newthread, pt_deadq);
364f4fd6b79Sad 			pthread_mutex_unlock(&pthread__deadqueue_lock);
365a014cf23Sad 			/* Still running? */
36666ac2ffaSad 			if (newthread->pt_lwpctl->lc_curcpu !=
36766ac2ffaSad 			    LWPCTL_CPU_EXITED &&
36866ac2ffaSad 			    (_lwp_kill(newthread->pt_lid, 0) == 0 ||
36966ac2ffaSad 			    errno != ESRCH)) {
370989565f8Sad 				pthread_mutex_lock(&pthread__deadqueue_lock);
37150fa8db4Sad 				PTQ_INSERT_TAIL(&pthread__deadqueue,
372b8833ff5Sad 				    newthread, pt_deadq);
373989565f8Sad 				pthread_mutex_unlock(&pthread__deadqueue_lock);
3744cdc2ed8Syamt 				newthread = NULL;
37550fa8db4Sad 			}
376a014cf23Sad 		} else
377f4fd6b79Sad 			pthread_mutex_unlock(&pthread__deadqueue_lock);
378a014cf23Sad 	}
379a014cf23Sad 
380b8833ff5Sad 	/*
381b8833ff5Sad 	 * If necessary set up a stack, allocate space for a pthread_st,
382b8833ff5Sad 	 * and initialize it.
383b8833ff5Sad 	 */
3844cdc2ed8Syamt 	if (newthread == NULL) {
385c62a74e6Sthorpej 		ret = pthread__stackalloc(&newthread);
386b7559f85Schristos 		if (ret != 0) {
387b7559f85Schristos 			if (name)
388b7559f85Schristos 				free(name);
389c62a74e6Sthorpej 			return ret;
390c62a74e6Sthorpej 		}
391b33971b9Sthorpej 
392b8833ff5Sad 		/* This is used only when creating the thread. */
393ed964af1Sad 		_INITCONTEXT_U(&newthread->pt_uc);
394877f8985Snathanw #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
395ed964af1Sad 		pthread__uc_id(&newthread->pt_uc) = newthread;
396877f8985Snathanw #endif
397ed964af1Sad 		newthread->pt_uc.uc_stack = newthread->pt_stack;
398ed964af1Sad 		newthread->pt_uc.uc_link = NULL;
399b8833ff5Sad 
400b8833ff5Sad 		/* Add to list of all threads. */
4019583eeb2Sad 		pthread_rwlock_wrlock(&pthread__alltree_lock);
402f1b2c1c4Sad 		PTQ_INSERT_TAIL(&pthread__allqueue, newthread, pt_allq);
4039583eeb2Sad 		RB_INSERT(__pthread__alltree, &pthread__alltree, newthread);
4049583eeb2Sad 		pthread_rwlock_unlock(&pthread__alltree_lock);
405b8833ff5Sad 
406b8833ff5Sad 		/* Will be reset by the thread upon exit. */
407b8833ff5Sad 		pthread__initthread(newthread);
408ed964af1Sad 	}
409ed964af1Sad 
410b8833ff5Sad 	/*
411b8833ff5Sad 	 * Create the new LWP.
412b8833ff5Sad 	 */
413b8833ff5Sad 	pthread__scrubthread(newthread, name, nattr.pta_flags);
41466ac2ffaSad 	makecontext(&newthread->pt_uc, pthread__create_tramp, 3,
41566ac2ffaSad 	    newthread, startfunc, arg);
416c62a74e6Sthorpej 
417989565f8Sad 	flag = LWP_DETACHED;
418ded26025Sad 	if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0)
419ded26025Sad 		flag |= LWP_SUSPENDED;
420ed964af1Sad 	ret = _lwp_create(&newthread->pt_uc, flag, &newthread->pt_lid);
4211ac6a89bSad 	if (ret != 0) {
4221ac6a89bSad 		free(name);
423b8833ff5Sad 		newthread->pt_state = PT_STATE_DEAD;
424f4fd6b79Sad 		pthread_mutex_lock(&pthread__deadqueue_lock);
425b8833ff5Sad 		PTQ_INSERT_HEAD(&pthread__deadqueue, newthread, pt_deadq);
426f4fd6b79Sad 		pthread_mutex_unlock(&pthread__deadqueue_lock);
4271ac6a89bSad 		return ret;
4281ac6a89bSad 	}
4291ac6a89bSad 
430c62a74e6Sthorpej 	*thread = newthread;
431c62a74e6Sthorpej 
432c62a74e6Sthorpej 	return 0;
433c62a74e6Sthorpej }
434c62a74e6Sthorpej 
435c62a74e6Sthorpej 
436c62a74e6Sthorpej static void
43766ac2ffaSad pthread__create_tramp(pthread_t self, void *(*start)(void *), void *arg)
438c62a74e6Sthorpej {
439c62a74e6Sthorpej 	void *retval;
440c62a74e6Sthorpej 
44166ac2ffaSad #ifdef PTHREAD__HAVE_THREADREG
44266ac2ffaSad 	/* Set up identity register. */
44366ac2ffaSad 	pthread__threadreg_set(self);
44466ac2ffaSad #endif
44566ac2ffaSad 
44650fa8db4Sad 	/*
44750fa8db4Sad 	 * Throw away some stack in a feeble attempt to reduce cache
44850fa8db4Sad 	 * thrash.  May help for SMT processors.  XXX We should not
44950fa8db4Sad 	 * be allocating stacks on fixed 2MB boundaries.  Needs a
450f63239c2Sad 	 * thread register or decent thread local storage.  Note
451f63239c2Sad 	 * that pt_lid may not be set by this point, but we don't
452f63239c2Sad 	 * care.
45350fa8db4Sad 	 */
454f63239c2Sad 	(void)alloca(((unsigned)self->pt_lid & 7) << 8);
455f63239c2Sad 
456f63239c2Sad 	if (self->pt_name != NULL) {
457f63239c2Sad 		pthread_mutex_lock(&self->pt_lock);
458f63239c2Sad 		if (self->pt_name != NULL)
45966ac2ffaSad 			(void)_lwp_setname(0, self->pt_name);
460f63239c2Sad 		pthread_mutex_unlock(&self->pt_lock);
461f63239c2Sad 	}
46250fa8db4Sad 
463eceac52fSad 	if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) {
464eceac52fSad 		err(1, "_lwp_ctl");
465eceac52fSad 	}
46666ac2ffaSad 
46794a458ceSchs 	retval = (*start)(arg);
468c62a74e6Sthorpej 
469c62a74e6Sthorpej 	pthread_exit(retval);
470c62a74e6Sthorpej 
471143f5a27Schristos 	/*NOTREACHED*/
472143f5a27Schristos 	pthread__abort();
473c62a74e6Sthorpej }
474c62a74e6Sthorpej 
47538b1c6f4Schristos int
47638b1c6f4Schristos pthread_suspend_np(pthread_t thread)
47738b1c6f4Schristos {
478ba70e96aSchs 	pthread_t self;
479ba70e96aSchs 
480ba70e96aSchs 	self = pthread__self();
48138b1c6f4Schristos 	if (self == thread) {
48238b1c6f4Schristos 		return EDEADLK;
48338b1c6f4Schristos 	}
484d9adedd7Sad 	if (pthread__find(thread) != 0)
485ba70e96aSchs 		return ESRCH;
48640724da2Sad 	if (_lwp_suspend(thread->pt_lid) == 0)
48740724da2Sad 		return 0;
48840724da2Sad 	return errno;
48938b1c6f4Schristos }
49038b1c6f4Schristos 
49138b1c6f4Schristos int
49238b1c6f4Schristos pthread_resume_np(pthread_t thread)
49338b1c6f4Schristos {
49438b1c6f4Schristos 
495d9adedd7Sad 	if (pthread__find(thread) != 0)
496ba70e96aSchs 		return ESRCH;
49740724da2Sad 	if (_lwp_continue(thread->pt_lid) == 0)
49840724da2Sad 		return 0;
49940724da2Sad 	return errno;
50038b1c6f4Schristos }
50138b1c6f4Schristos 
502c62a74e6Sthorpej void
503c62a74e6Sthorpej pthread_exit(void *retval)
504c62a74e6Sthorpej {
50596b5a26dSnathanw 	pthread_t self;
506c62a74e6Sthorpej 	struct pt_clean_t *cleanup;
507b33971b9Sthorpej 	char *name;
508c62a74e6Sthorpej 
509c62a74e6Sthorpej 	self = pthread__self();
510c62a74e6Sthorpej 
511c62a74e6Sthorpej 	/* Disable cancellability. */
512f4fd6b79Sad 	pthread_mutex_lock(&self->pt_lock);
513c62a74e6Sthorpej 	self->pt_flags |= PT_FLAG_CS_DISABLED;
51466fcc1ceSnathanw 	self->pt_cancel = 0;
515c62a74e6Sthorpej 
516c62a74e6Sthorpej 	/* Call any cancellation cleanup handlers */
517989565f8Sad 	if (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
518989565f8Sad 		pthread_mutex_unlock(&self->pt_lock);
519c62a74e6Sthorpej 		while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
520c62a74e6Sthorpej 			cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
521c62a74e6Sthorpej 			PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
522c62a74e6Sthorpej 			(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
523c62a74e6Sthorpej 		}
524989565f8Sad 		pthread_mutex_lock(&self->pt_lock);
525989565f8Sad 	}
526c62a74e6Sthorpej 
527c62a74e6Sthorpej 	/* Perform cleanup of thread-specific data */
528c62a74e6Sthorpej 	pthread__destroy_tsd(self);
529c62a74e6Sthorpej 
530989565f8Sad 	/* Signal our exit. */
531c62a74e6Sthorpej 	self->pt_exitval = retval;
5326b2b9c62Syamt 	if (self->pt_flags & PT_FLAG_DETACHED) {
5336b2b9c62Syamt 		self->pt_state = PT_STATE_DEAD;
534b33971b9Sthorpej 		name = self->pt_name;
535b33971b9Sthorpej 		self->pt_name = NULL;
536f4fd6b79Sad 		pthread_mutex_unlock(&self->pt_lock);
537b33971b9Sthorpej 		if (name != NULL)
538b33971b9Sthorpej 			free(name);
539989565f8Sad 		pthread_mutex_lock(&pthread__deadqueue_lock);
540989565f8Sad 		PTQ_INSERT_TAIL(&pthread__deadqueue, self, pt_deadq);
541989565f8Sad 		pthread_mutex_unlock(&pthread__deadqueue_lock);
5421ac6a89bSad 		_lwp_exit();
543c62a74e6Sthorpej 	} else {
5446b2b9c62Syamt 		self->pt_state = PT_STATE_ZOMBIE;
545989565f8Sad 		pthread_cond_broadcast(&self->pt_joiners);
546f4fd6b79Sad 		pthread_mutex_unlock(&self->pt_lock);
547b33971b9Sthorpej 		/* Note: name will be freed by the joiner. */
5481ac6a89bSad 		_lwp_exit();
549c62a74e6Sthorpej 	}
550c62a74e6Sthorpej 
551143f5a27Schristos 	/*NOTREACHED*/
552143f5a27Schristos 	pthread__abort();
553c62a74e6Sthorpej 	exit(1);
554c62a74e6Sthorpej }
555c62a74e6Sthorpej 
556c62a74e6Sthorpej 
557c62a74e6Sthorpej int
558c62a74e6Sthorpej pthread_join(pthread_t thread, void **valptr)
559c62a74e6Sthorpej {
560c62a74e6Sthorpej 	pthread_t self;
561989565f8Sad 	int error;
562c62a74e6Sthorpej 
563c62a74e6Sthorpej 	self = pthread__self();
564c62a74e6Sthorpej 
565d9adedd7Sad 	if (pthread__find(thread) != 0)
566c62a74e6Sthorpej 		return ESRCH;
567c62a74e6Sthorpej 
568c62a74e6Sthorpej 	if (thread->pt_magic != PT_MAGIC)
569c62a74e6Sthorpej 		return EINVAL;
570c62a74e6Sthorpej 
571c62a74e6Sthorpej 	if (thread == self)
572c62a74e6Sthorpej 		return EDEADLK;
573c62a74e6Sthorpej 
574989565f8Sad 	self->pt_droplock = &thread->pt_lock;
575989565f8Sad 	pthread_mutex_lock(&thread->pt_lock);
576989565f8Sad 	for (;;) {
577989565f8Sad 		if (thread->pt_state == PT_STATE_ZOMBIE)
578989565f8Sad 			break;
579989565f8Sad 		if (thread->pt_state == PT_STATE_DEAD) {
580989565f8Sad 			pthread_mutex_unlock(&thread->pt_lock);
581989565f8Sad 			self->pt_droplock = NULL;
582989565f8Sad 			return ESRCH;
583989565f8Sad 		}
584989565f8Sad 		if ((thread->pt_flags & PT_FLAG_DETACHED) != 0) {
585989565f8Sad 			pthread_mutex_unlock(&thread->pt_lock);
586989565f8Sad 			self->pt_droplock = NULL;
587989565f8Sad 			return EINVAL;
588989565f8Sad 		}
589989565f8Sad 		error = pthread_cond_wait(&thread->pt_joiners,
590989565f8Sad 		    &thread->pt_lock);
591622bbc50Sad 		if (error != 0) {
592622bbc50Sad 			pthread__errorfunc(__FILE__, __LINE__,
593622bbc50Sad 			    __func__, "unexpected return from cond_wait()");
594c94f5a91Sad 		}
595622bbc50Sad 
596c94f5a91Sad 	}
5971ac6a89bSad 	if (valptr != NULL)
598ded26025Sad 		*valptr = thread->pt_exitval;
599989565f8Sad 	/* pthread__reap() will drop the lock. */
600989565f8Sad 	pthread__reap(thread);
601989565f8Sad 	self->pt_droplock = NULL;
602989565f8Sad 
603c94f5a91Sad 	return 0;
604c62a74e6Sthorpej }
605c62a74e6Sthorpej 
606989565f8Sad static void
607989565f8Sad pthread__reap(pthread_t thread)
608989565f8Sad {
609989565f8Sad 	char *name;
610989565f8Sad 
611989565f8Sad 	name = thread->pt_name;
612989565f8Sad 	thread->pt_name = NULL;
613989565f8Sad 	thread->pt_state = PT_STATE_DEAD;
614989565f8Sad 	pthread_mutex_unlock(&thread->pt_lock);
615989565f8Sad 
616989565f8Sad 	pthread_mutex_lock(&pthread__deadqueue_lock);
617989565f8Sad 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_deadq);
618989565f8Sad 	pthread_mutex_unlock(&pthread__deadqueue_lock);
619989565f8Sad 
620989565f8Sad 	if (name != NULL)
621989565f8Sad 		free(name);
622989565f8Sad }
623c62a74e6Sthorpej 
624c62a74e6Sthorpej int
625c62a74e6Sthorpej pthread_equal(pthread_t t1, pthread_t t2)
626c62a74e6Sthorpej {
627c62a74e6Sthorpej 
628c62a74e6Sthorpej 	/* Nothing special here. */
629c62a74e6Sthorpej 	return (t1 == t2);
630c62a74e6Sthorpej }
631c62a74e6Sthorpej 
632c62a74e6Sthorpej 
633c62a74e6Sthorpej int
634c62a74e6Sthorpej pthread_detach(pthread_t thread)
635c62a74e6Sthorpej {
636c62a74e6Sthorpej 
637d9adedd7Sad 	if (pthread__find(thread) != 0)
638c62a74e6Sthorpej 		return ESRCH;
639c62a74e6Sthorpej 
640c62a74e6Sthorpej 	if (thread->pt_magic != PT_MAGIC)
641c62a74e6Sthorpej 		return EINVAL;
642c62a74e6Sthorpej 
643f4fd6b79Sad 	pthread_mutex_lock(&thread->pt_lock);
6441296e850Sad 	thread->pt_flags |= PT_FLAG_DETACHED;
645989565f8Sad 	if (thread->pt_state == PT_STATE_ZOMBIE) {
646989565f8Sad 		/* pthread__reap() will drop the lock. */
647989565f8Sad 		pthread__reap(thread);
648989565f8Sad 	} else {
649989565f8Sad 		/*
650989565f8Sad 		 * Not valid for threads to be waiting in
651989565f8Sad 		 * pthread_join() (there are intractable
652989565f8Sad 		 * sync issues from the application
653989565f8Sad 		 * perspective), but give those threads
654989565f8Sad 		 * a chance anyway.
655989565f8Sad 		 */
656989565f8Sad 		pthread_cond_broadcast(&thread->pt_joiners);
657f4fd6b79Sad 		pthread_mutex_unlock(&thread->pt_lock);
658989565f8Sad 	}
659de213816Sad 
66040724da2Sad 	return 0;
661c62a74e6Sthorpej }
662c62a74e6Sthorpej 
663c62a74e6Sthorpej 
664c62a74e6Sthorpej int
665b33971b9Sthorpej pthread_getname_np(pthread_t thread, char *name, size_t len)
666c62a74e6Sthorpej {
667c62a74e6Sthorpej 
668d9adedd7Sad 	if (pthread__find(thread) != 0)
669b33971b9Sthorpej 		return ESRCH;
670b33971b9Sthorpej 
671b33971b9Sthorpej 	if (thread->pt_magic != PT_MAGIC)
672b33971b9Sthorpej 		return EINVAL;
673b33971b9Sthorpej 
674f4fd6b79Sad 	pthread_mutex_lock(&thread->pt_lock);
675b33971b9Sthorpej 	if (thread->pt_name == NULL)
676b33971b9Sthorpej 		name[0] = '\0';
677b33971b9Sthorpej 	else
678b33971b9Sthorpej 		strlcpy(name, thread->pt_name, len);
679f4fd6b79Sad 	pthread_mutex_unlock(&thread->pt_lock);
680c62a74e6Sthorpej 
681c62a74e6Sthorpej 	return 0;
682c62a74e6Sthorpej }
683c62a74e6Sthorpej 
684c62a74e6Sthorpej 
685c62a74e6Sthorpej int
686b33971b9Sthorpej pthread_setname_np(pthread_t thread, const char *name, void *arg)
687b33971b9Sthorpej {
688b33971b9Sthorpej 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
689b33971b9Sthorpej 	int namelen;
690b33971b9Sthorpej 
691d9adedd7Sad 	if (pthread__find(thread) != 0)
692b33971b9Sthorpej 		return ESRCH;
693b33971b9Sthorpej 
694b33971b9Sthorpej 	if (thread->pt_magic != PT_MAGIC)
695b33971b9Sthorpej 		return EINVAL;
696b33971b9Sthorpej 
697b33971b9Sthorpej 	namelen = snprintf(newname, sizeof(newname), name, arg);
698b33971b9Sthorpej 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
699b33971b9Sthorpej 		return EINVAL;
700b33971b9Sthorpej 
701b33971b9Sthorpej 	cp = strdup(newname);
702b33971b9Sthorpej 	if (cp == NULL)
703b33971b9Sthorpej 		return ENOMEM;
704b33971b9Sthorpej 
705f4fd6b79Sad 	pthread_mutex_lock(&thread->pt_lock);
706b33971b9Sthorpej 	oldname = thread->pt_name;
707b33971b9Sthorpej 	thread->pt_name = cp;
708f63239c2Sad 	(void)_lwp_setname(thread->pt_lid, cp);
709f4fd6b79Sad 	pthread_mutex_unlock(&thread->pt_lock);
710b33971b9Sthorpej 
711b33971b9Sthorpej 	if (oldname != NULL)
712b33971b9Sthorpej 		free(oldname);
713b33971b9Sthorpej 
714b33971b9Sthorpej 	return 0;
715b33971b9Sthorpej }
716b33971b9Sthorpej 
717b33971b9Sthorpej 
718b33971b9Sthorpej 
719c62a74e6Sthorpej /*
720c62a74e6Sthorpej  * XXX There should be a way for applications to use the efficent
721c62a74e6Sthorpej  *  inline version, but there are opacity/namespace issues.
722c62a74e6Sthorpej  */
723c62a74e6Sthorpej pthread_t
724c62a74e6Sthorpej pthread_self(void)
725c62a74e6Sthorpej {
726c62a74e6Sthorpej 
727c62a74e6Sthorpej 	return pthread__self();
728c62a74e6Sthorpej }
729c62a74e6Sthorpej 
730c62a74e6Sthorpej 
731c62a74e6Sthorpej int
732c62a74e6Sthorpej pthread_cancel(pthread_t thread)
733c62a74e6Sthorpej {
734c62a74e6Sthorpej 
735d9adedd7Sad 	if (pthread__find(thread) != 0)
736ba70e96aSchs 		return ESRCH;
737f4fd6b79Sad 	pthread_mutex_lock(&thread->pt_lock);
7381ac6a89bSad 	thread->pt_flags |= PT_FLAG_CS_PENDING;
7391ac6a89bSad 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
7401ac6a89bSad 		thread->pt_cancel = 1;
741f4fd6b79Sad 		pthread_mutex_unlock(&thread->pt_lock);
7421ac6a89bSad 		_lwp_wakeup(thread->pt_lid);
7431ac6a89bSad 	} else
744f4fd6b79Sad 		pthread_mutex_unlock(&thread->pt_lock);
745c62a74e6Sthorpej 
746c62a74e6Sthorpej 	return 0;
747c62a74e6Sthorpej }
748c62a74e6Sthorpej 
749c62a74e6Sthorpej 
750c62a74e6Sthorpej int
751c62a74e6Sthorpej pthread_setcancelstate(int state, int *oldstate)
752c62a74e6Sthorpej {
753c62a74e6Sthorpej 	pthread_t self;
7540878df5dSnathanw 	int retval;
755c62a74e6Sthorpej 
756c62a74e6Sthorpej 	self = pthread__self();
7570878df5dSnathanw 	retval = 0;
758c62a74e6Sthorpej 
759f4fd6b79Sad 	pthread_mutex_lock(&self->pt_lock);
76050fa8db4Sad 
761c62a74e6Sthorpej 	if (oldstate != NULL) {
7620878df5dSnathanw 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
763c62a74e6Sthorpej 			*oldstate = PTHREAD_CANCEL_DISABLE;
764c62a74e6Sthorpej 		else
765c62a74e6Sthorpej 			*oldstate = PTHREAD_CANCEL_ENABLE;
766c62a74e6Sthorpej 	}
767c62a74e6Sthorpej 
7680878df5dSnathanw 	if (state == PTHREAD_CANCEL_DISABLE) {
7690878df5dSnathanw 		self->pt_flags |= PT_FLAG_CS_DISABLED;
7700878df5dSnathanw 		if (self->pt_cancel) {
7710878df5dSnathanw 			self->pt_flags |= PT_FLAG_CS_PENDING;
7720878df5dSnathanw 			self->pt_cancel = 0;
7730878df5dSnathanw 		}
7740878df5dSnathanw 	} else if (state == PTHREAD_CANCEL_ENABLE) {
7750878df5dSnathanw 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
776c62a74e6Sthorpej 		/*
777c62a74e6Sthorpej 		 * If a cancellation was requested while cancellation
778c62a74e6Sthorpej 		 * was disabled, note that fact for future
779c62a74e6Sthorpej 		 * cancellation tests.
780c62a74e6Sthorpej 		 */
7810878df5dSnathanw 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
782c62a74e6Sthorpej 			self->pt_cancel = 1;
783c62a74e6Sthorpej 			/* This is not a deferred cancellation point. */
7840878df5dSnathanw 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
785f4fd6b79Sad 				pthread_mutex_unlock(&self->pt_lock);
786989565f8Sad 				pthread__cancelled();
787c62a74e6Sthorpej 			}
7880878df5dSnathanw 		}
789c62a74e6Sthorpej 	} else
7900878df5dSnathanw 		retval = EINVAL;
791c62a74e6Sthorpej 
792f4fd6b79Sad 	pthread_mutex_unlock(&self->pt_lock);
79350fa8db4Sad 
7940878df5dSnathanw 	return retval;
795c62a74e6Sthorpej }
796c62a74e6Sthorpej 
797c62a74e6Sthorpej 
798c62a74e6Sthorpej int
799c62a74e6Sthorpej pthread_setcanceltype(int type, int *oldtype)
800c62a74e6Sthorpej {
801c62a74e6Sthorpej 	pthread_t self;
8020878df5dSnathanw 	int retval;
803c62a74e6Sthorpej 
804c62a74e6Sthorpej 	self = pthread__self();
8050878df5dSnathanw 	retval = 0;
8060878df5dSnathanw 
807f4fd6b79Sad 	pthread_mutex_lock(&self->pt_lock);
808c62a74e6Sthorpej 
809c62a74e6Sthorpej 	if (oldtype != NULL) {
8100878df5dSnathanw 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
811c62a74e6Sthorpej 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
812c62a74e6Sthorpej 		else
813c62a74e6Sthorpej 			*oldtype = PTHREAD_CANCEL_DEFERRED;
814c62a74e6Sthorpej 	}
815c62a74e6Sthorpej 
816c62a74e6Sthorpej 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
8170878df5dSnathanw 		self->pt_flags |= PT_FLAG_CS_ASYNC;
8180878df5dSnathanw 		if (self->pt_cancel) {
819f4fd6b79Sad 			pthread_mutex_unlock(&self->pt_lock);
820989565f8Sad 			pthread__cancelled();
8210878df5dSnathanw 		}
822c62a74e6Sthorpej 	} else if (type == PTHREAD_CANCEL_DEFERRED)
8230878df5dSnathanw 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
824c62a74e6Sthorpej 	else
8250878df5dSnathanw 		retval = EINVAL;
826c62a74e6Sthorpej 
827f4fd6b79Sad 	pthread_mutex_unlock(&self->pt_lock);
82850fa8db4Sad 
8290878df5dSnathanw 	return retval;
830c62a74e6Sthorpej }
831c62a74e6Sthorpej 
832c62a74e6Sthorpej 
833c62a74e6Sthorpej void
834989565f8Sad pthread_testcancel(void)
835c62a74e6Sthorpej {
836c62a74e6Sthorpej 	pthread_t self;
837c62a74e6Sthorpej 
838c62a74e6Sthorpej 	self = pthread__self();
839c62a74e6Sthorpej 	if (self->pt_cancel)
840989565f8Sad 		pthread__cancelled();
841c62a74e6Sthorpej }
842c62a74e6Sthorpej 
843c62a74e6Sthorpej 
844c62a74e6Sthorpej /*
845c62a74e6Sthorpej  * POSIX requires that certain functions return an error rather than
846c62a74e6Sthorpej  * invoking undefined behavior even when handed completely bogus
847c62a74e6Sthorpej  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
848c62a74e6Sthorpej  * utility routine searches the list of threads for the pthread_t
849c62a74e6Sthorpej  * value without dereferencing it.
850c62a74e6Sthorpej  */
851c62a74e6Sthorpej int
852d9adedd7Sad pthread__find(pthread_t id)
853c62a74e6Sthorpej {
854c62a74e6Sthorpej 	pthread_t target;
855c62a74e6Sthorpej 
8569583eeb2Sad 	pthread_rwlock_rdlock(&pthread__alltree_lock);
8579583eeb2Sad 	/* LINTED */
8589583eeb2Sad 	target = RB_FIND(__pthread__alltree, &pthread__alltree, id);
8599583eeb2Sad 	pthread_rwlock_unlock(&pthread__alltree_lock);
860c62a74e6Sthorpej 
861b8833ff5Sad 	if (target == NULL || target->pt_state == PT_STATE_DEAD)
862c62a74e6Sthorpej 		return ESRCH;
863c62a74e6Sthorpej 
864c62a74e6Sthorpej 	return 0;
865c62a74e6Sthorpej }
866c62a74e6Sthorpej 
867c62a74e6Sthorpej 
868c62a74e6Sthorpej void
869c62a74e6Sthorpej pthread__testcancel(pthread_t self)
870c62a74e6Sthorpej {
871c62a74e6Sthorpej 
872c62a74e6Sthorpej 	if (self->pt_cancel)
873989565f8Sad 		pthread__cancelled();
874989565f8Sad }
875989565f8Sad 
876989565f8Sad 
877989565f8Sad void
878989565f8Sad pthread__cancelled(void)
879989565f8Sad {
880989565f8Sad 	pthread_mutex_t *droplock;
881989565f8Sad 	pthread_t self;
882989565f8Sad 
883989565f8Sad 	self = pthread__self();
884989565f8Sad 	droplock = self->pt_droplock;
885989565f8Sad 	self->pt_droplock = NULL;
886989565f8Sad 
887989565f8Sad 	if (droplock != NULL && pthread_mutex_held_np(droplock))
888989565f8Sad 		pthread_mutex_unlock(droplock);
889989565f8Sad 
890c62a74e6Sthorpej 	pthread_exit(PTHREAD_CANCELED);
891c62a74e6Sthorpej }
892c62a74e6Sthorpej 
893c62a74e6Sthorpej 
894c62a74e6Sthorpej void
895c62a74e6Sthorpej pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
896c62a74e6Sthorpej {
897c62a74e6Sthorpej 	pthread_t self;
898c62a74e6Sthorpej 	struct pt_clean_t *entry;
899c62a74e6Sthorpej 
900c62a74e6Sthorpej 	self = pthread__self();
901c62a74e6Sthorpej 	entry = store;
902c62a74e6Sthorpej 	entry->ptc_cleanup = cleanup;
903c62a74e6Sthorpej 	entry->ptc_arg = arg;
904c62a74e6Sthorpej 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
905c62a74e6Sthorpej }
906c62a74e6Sthorpej 
907c62a74e6Sthorpej 
908c62a74e6Sthorpej void
909c62a74e6Sthorpej pthread__cleanup_pop(int ex, void *store)
910c62a74e6Sthorpej {
911c62a74e6Sthorpej 	pthread_t self;
912c62a74e6Sthorpej 	struct pt_clean_t *entry;
913c62a74e6Sthorpej 
914c62a74e6Sthorpej 	self = pthread__self();
915c62a74e6Sthorpej 	entry = store;
916c62a74e6Sthorpej 
917c62a74e6Sthorpej 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
918c62a74e6Sthorpej 	if (ex)
919c62a74e6Sthorpej 		(*entry->ptc_cleanup)(entry->ptc_arg);
920c62a74e6Sthorpej }
921c62a74e6Sthorpej 
922c62a74e6Sthorpej 
923783e2f6dSad int *
924783e2f6dSad pthread__errno(void)
925783e2f6dSad {
926783e2f6dSad 	pthread_t self;
927783e2f6dSad 
928783e2f6dSad 	self = pthread__self();
929783e2f6dSad 
930783e2f6dSad 	return &(self->pt_errno);
931783e2f6dSad }
932783e2f6dSad 
9330c967901Snathanw ssize_t	_sys_write(int, const void *, size_t);
9340c967901Snathanw 
9358bcff70bSnathanw void
9360e6c93b9Sdrochner pthread__assertfunc(const char *file, int line, const char *function,
9370e6c93b9Sdrochner 		    const char *expr)
9388bcff70bSnathanw {
9398bcff70bSnathanw 	char buf[1024];
9408bcff70bSnathanw 	int len;
9418bcff70bSnathanw 
9428bcff70bSnathanw 	/*
9438bcff70bSnathanw 	 * snprintf should not acquire any locks, or we could
9448bcff70bSnathanw 	 * end up deadlocked if the assert caller held locks.
9458bcff70bSnathanw 	 */
9468bcff70bSnathanw 	len = snprintf(buf, 1024,
9478bcff70bSnathanw 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
9488bcff70bSnathanw 	    expr, file, line,
9498bcff70bSnathanw 	    function ? ", function \"" : "",
9508bcff70bSnathanw 	    function ? function : "",
9518bcff70bSnathanw 	    function ? "\"" : "");
9528bcff70bSnathanw 
9530c967901Snathanw 	_sys_write(STDERR_FILENO, buf, (size_t)len);
9548bcff70bSnathanw 	(void)kill(getpid(), SIGABRT);
9558bcff70bSnathanw 
9568bcff70bSnathanw 	_exit(1);
9578bcff70bSnathanw }
958df277271Snathanw 
959df277271Snathanw 
960df277271Snathanw void
9610e6c93b9Sdrochner pthread__errorfunc(const char *file, int line, const char *function,
9620e6c93b9Sdrochner 		   const char *msg)
963df277271Snathanw {
964df277271Snathanw 	char buf[1024];
9650172694eSnathanw 	size_t len;
966df277271Snathanw 
9670172694eSnathanw 	if (pthread__diagassert == 0)
968df277271Snathanw 		return;
969df277271Snathanw 
970df277271Snathanw 	/*
971df277271Snathanw 	 * snprintf should not acquire any locks, or we could
972df277271Snathanw 	 * end up deadlocked if the assert caller held locks.
973df277271Snathanw 	 */
974df277271Snathanw 	len = snprintf(buf, 1024,
9750172694eSnathanw 	    "%s: Error detected by libpthread: %s.\n"
9760172694eSnathanw 	    "Detected by file \"%s\", line %d%s%s%s.\n"
9770172694eSnathanw 	    "See pthread(3) for information.\n",
9780172694eSnathanw 	    getprogname(), msg, file, line,
979df277271Snathanw 	    function ? ", function \"" : "",
980df277271Snathanw 	    function ? function : "",
9810172694eSnathanw 	    function ? "\"" : "");
982df277271Snathanw 
9830172694eSnathanw 	if (pthread__diagassert & DIAGASSERT_STDERR)
9840c967901Snathanw 		_sys_write(STDERR_FILENO, buf, len);
9850172694eSnathanw 
9860172694eSnathanw 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
9870172694eSnathanw 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
9880172694eSnathanw 
9890172694eSnathanw 	if (pthread__diagassert & DIAGASSERT_ABORT) {
990df277271Snathanw 		(void)kill(getpid(), SIGABRT);
991df277271Snathanw 		_exit(1);
992df277271Snathanw 	}
993df277271Snathanw }
9941ac6a89bSad 
995fe9718acSad /*
996ded26025Sad  * Thread park/unpark operations.  The kernel operations are
997ded26025Sad  * modelled after a brief description from "Multithreading in
998ded26025Sad  * the Solaris Operating Environment":
999fe9718acSad  *
1000fe9718acSad  * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf
1001fe9718acSad  */
1002fe9718acSad 
10031ac6a89bSad #define	OOPS(msg)			\
1004858097f9Schristos     pthread__errorfunc(__FILE__, __LINE__, __func__, msg)
10051ac6a89bSad 
10061ac6a89bSad int
1007*2bcb8bf1Sad pthread__park(pthread_t self, pthread_mutex_t *lock,
100850fa8db4Sad 	      pthread_queue_t *queue, const struct timespec *abstime,
100950fa8db4Sad 	      int cancelpt, const void *hint)
10101ac6a89bSad {
1011c3f8e2eeSad 	int rv, error;
10128ccc6e06Sad 	void *obj;
10131ac6a89bSad 
1014c3f8e2eeSad 	/*
101566ac2ffaSad 	 * For non-interlocked release of mutexes we need a store
101666ac2ffaSad 	 * barrier before incrementing pt_blocking away from zero.
1017*2bcb8bf1Sad 	 * This is provided by pthread_mutex_unlock().
101866ac2ffaSad 	 */
1019*2bcb8bf1Sad 	self->pt_willpark = 1;
1020*2bcb8bf1Sad 	pthread_mutex_unlock(lock);
1021*2bcb8bf1Sad 	self->pt_willpark = 0;
102266ac2ffaSad 	self->pt_blocking++;
102366ac2ffaSad 
102466ac2ffaSad 	/*
1025ded26025Sad 	 * Wait until we are awoken by a pending unpark operation,
1026ded26025Sad 	 * a signal, an unpark posted after we have gone asleep,
1027ded26025Sad 	 * or an expired timeout.
102850fa8db4Sad 	 *
1029*2bcb8bf1Sad 	 * It is fine to test the value of pt_sleepobj without
1030*2bcb8bf1Sad 	 * holding any locks, because:
103150fa8db4Sad 	 *
103250fa8db4Sad 	 * o Only the blocking thread (this thread) ever sets them
103350fa8db4Sad 	 *   to a non-NULL value.
103450fa8db4Sad 	 *
103550fa8db4Sad 	 * o Other threads may set them NULL, but if they do so they
103650fa8db4Sad 	 *   must also make this thread return from _lwp_park.
103750fa8db4Sad 	 *
103850fa8db4Sad 	 * o _lwp_park, _lwp_unpark and _lwp_unpark_all are system
103950fa8db4Sad 	 *   calls and all make use of spinlocks in the kernel.  So
104050fa8db4Sad 	 *   these system calls act as full memory barriers, and will
104150fa8db4Sad 	 *   ensure that the calling CPU's store buffers are drained.
104250fa8db4Sad 	 *   In combination with the spinlock release before unpark,
104350fa8db4Sad 	 *   this means that modification of pt_sleepobj/onq by another
104450fa8db4Sad 	 *   thread will become globally visible before that thread
104550fa8db4Sad 	 *   schedules an unpark operation on this thread.
1046c3f8e2eeSad 	 *
1047c3f8e2eeSad 	 * Note: the test in the while() statement dodges the park op if
1048c3f8e2eeSad 	 * we have already been awoken, unless there is another thread to
1049c3f8e2eeSad 	 * awaken.  This saves a syscall - if we were already awakened,
1050c3f8e2eeSad 	 * the next call to _lwp_park() would need to return early in order
1051c3f8e2eeSad 	 * to eat the previous wakeup.
1052ded26025Sad 	 */
1053ded26025Sad 	rv = 0;
1054*2bcb8bf1Sad 	do {
1055c3f8e2eeSad 		/*
1056c3f8e2eeSad 		 * If we deferred unparking a thread, arrange to
1057c3f8e2eeSad 		 * have _lwp_park() restart it before blocking.
1058c3f8e2eeSad 		 */
1059*2bcb8bf1Sad 		error = _lwp_park(abstime, self->pt_unpark, hint, hint);
1060c3f8e2eeSad 		self->pt_unpark = 0;
1061c3f8e2eeSad 		if (error != 0) {
1062ded26025Sad 			switch (rv = errno) {
10631ac6a89bSad 			case EINTR:
10641ac6a89bSad 			case EALREADY:
1065ded26025Sad 				rv = 0;
1066ded26025Sad 				break;
1067ded26025Sad 			case ETIMEDOUT:
10681ac6a89bSad 				break;
10691ac6a89bSad 			default:
10701ac6a89bSad 				OOPS("_lwp_park failed");
10711ac6a89bSad 				break;
10721ac6a89bSad 			}
1073ded26025Sad 		}
10740c61b6a6Sad 		/* Check for cancellation. */
1075d9adedd7Sad 		if (cancelpt && self->pt_cancel)
10760c61b6a6Sad 			rv = EINTR;
1077*2bcb8bf1Sad 	} while (self->pt_sleepobj != NULL && rv == 0);
10781ac6a89bSad 
1079ded26025Sad 	/*
1080ded26025Sad 	 * If we have been awoken early but are still on the queue,
108150fa8db4Sad 	 * then remove ourself.  Again, it's safe to do the test
108250fa8db4Sad 	 * without holding any locks.
1083ded26025Sad 	 */
1084*2bcb8bf1Sad 	if (__predict_false(self->pt_sleepobj != NULL)) {
1085*2bcb8bf1Sad 		pthread_mutex_lock(lock);
1086*2bcb8bf1Sad 		if ((obj = self->pt_sleepobj) != NULL) {
10871ac6a89bSad 			PTQ_REMOVE(queue, self, pt_sleep);
10881ac6a89bSad 			self->pt_sleepobj = NULL;
10898ccc6e06Sad 			if (obj != NULL && self->pt_early != NULL)
10908ccc6e06Sad 				(*self->pt_early)(obj);
1091792cc0e1Sad 		}
1092*2bcb8bf1Sad 		pthread_mutex_unlock(lock);
109350fa8db4Sad 	}
10948ccc6e06Sad 	self->pt_early = NULL;
109566ac2ffaSad 	self->pt_blocking--;
1096*2bcb8bf1Sad 	membar_sync();
10971ac6a89bSad 
10981ac6a89bSad 	return rv;
10991ac6a89bSad }
11001ac6a89bSad 
11011ac6a89bSad void
1102*2bcb8bf1Sad pthread__unpark(pthread_queue_t *queue, pthread_t self,
1103*2bcb8bf1Sad 		pthread_mutex_t *interlock)
11041ac6a89bSad {
1105*2bcb8bf1Sad 	pthread_t target;
1106*2bcb8bf1Sad 	u_int max, nwaiters;
11071ac6a89bSad 
1108*2bcb8bf1Sad 	max = pthread__unpark_max;
1109*2bcb8bf1Sad 	nwaiters = self->pt_nwaiters;
1110*2bcb8bf1Sad 	target = PTQ_FIRST(queue);
1111*2bcb8bf1Sad 	if (nwaiters == max) {
1112*2bcb8bf1Sad 		/* Overflow. */
1113*2bcb8bf1Sad 		(void)_lwp_unpark_all(self->pt_waiters, nwaiters,
1114*2bcb8bf1Sad 		    __UNVOLATILE(&interlock->ptm_waiters));
1115*2bcb8bf1Sad 		nwaiters = 0;
11160c61b6a6Sad 	}
11171ac6a89bSad 	target->pt_sleepobj = NULL;
1118*2bcb8bf1Sad 	self->pt_waiters[nwaiters++] = target->pt_lid;
1119*2bcb8bf1Sad 	PTQ_REMOVE(queue, target, pt_sleep);
1120*2bcb8bf1Sad 	self->pt_nwaiters = nwaiters;
1121*2bcb8bf1Sad 	pthread__mutex_deferwake(self, interlock);
1122c3f8e2eeSad }
11231ac6a89bSad 
11241ac6a89bSad void
1125*2bcb8bf1Sad pthread__unpark_all(pthread_queue_t *queue, pthread_t self,
1126*2bcb8bf1Sad 		    pthread_mutex_t *interlock)
11271ac6a89bSad {
1128*2bcb8bf1Sad 	pthread_t target;
1129*2bcb8bf1Sad 	u_int max, nwaiters;
1130ded26025Sad 
1131*2bcb8bf1Sad 	max = pthread__unpark_max;
1132*2bcb8bf1Sad 	nwaiters = self->pt_nwaiters;
1133*2bcb8bf1Sad 	PTQ_FOREACH(target, queue, pt_sleep) {
1134*2bcb8bf1Sad 		if (nwaiters == max) {
1135*2bcb8bf1Sad 			/* Overflow. */
1136*2bcb8bf1Sad 			(void)_lwp_unpark_all(self->pt_waiters, nwaiters,
1137*2bcb8bf1Sad 			    __UNVOLATILE(&interlock->ptm_waiters));
1138*2bcb8bf1Sad 			nwaiters = 0;
1139ded26025Sad 		}
1140*2bcb8bf1Sad 		target->pt_sleepobj = NULL;
1141*2bcb8bf1Sad 		self->pt_waiters[nwaiters++] = target->pt_lid;
11421ac6a89bSad 	}
1143*2bcb8bf1Sad 	self->pt_nwaiters = nwaiters;
1144*2bcb8bf1Sad 	PTQ_INIT(queue);
1145*2bcb8bf1Sad 	pthread__mutex_deferwake(self, interlock);
11461ac6a89bSad }
11471ac6a89bSad 
11481ac6a89bSad #undef	OOPS
11499e287199Sad 
11509e287199Sad /*
11519e287199Sad  * Allocate a stack for a thread, and set it up. It needs to be aligned, so
11529e287199Sad  * that a thread can find itself by its stack pointer.
11539e287199Sad  */
11549e287199Sad static int
11559e287199Sad pthread__stackalloc(pthread_t *newt)
11569e287199Sad {
11579e287199Sad 	void *addr;
11589e287199Sad 
11599e287199Sad 	addr = mmap(NULL, pthread__stacksize, PROT_READ|PROT_WRITE,
11609e287199Sad 	    MAP_ANON|MAP_PRIVATE | MAP_ALIGNED(pthread__stacksize_lg),
11619e287199Sad 	    -1, (off_t)0);
11629e287199Sad 
11639e287199Sad 	if (addr == MAP_FAILED)
11649e287199Sad 		return ENOMEM;
11659e287199Sad 
11669e287199Sad 	pthread__assert(((intptr_t)addr & pthread__stackmask) == 0);
11679e287199Sad 
11689e287199Sad 	return pthread__stackid_setup(addr, pthread__stacksize, newt);
11699e287199Sad }
11709e287199Sad 
11719e287199Sad 
11729e287199Sad /*
11739e287199Sad  * Set up the slightly special stack for the "initial" thread, which
11749e287199Sad  * runs on the normal system stack, and thus gets slightly different
11759e287199Sad  * treatment.
11769e287199Sad  */
11779e287199Sad static void
11789e287199Sad pthread__initmain(pthread_t *newt)
11799e287199Sad {
11809e287199Sad 	struct rlimit slimit;
11819e287199Sad 	size_t pagesize;
11829e287199Sad 	pthread_t t;
11839e287199Sad 	void *base;
11849e287199Sad 	size_t size;
11859e287199Sad 	int error, ret;
11869e287199Sad 	char *value;
11879e287199Sad 
11889e287199Sad 	pagesize = (size_t)sysconf(_SC_PAGESIZE);
11899e287199Sad 	pthread__stacksize = 0;
11909e287199Sad 	ret = getrlimit(RLIMIT_STACK, &slimit);
11919e287199Sad 	if (ret == -1)
11929e287199Sad 		err(1, "Couldn't get stack resource consumption limits");
119315e9cec1Sad 
119415e9cec1Sad 	value = pthread__getenv("PTHREAD_STACKSIZE");
119515e9cec1Sad 	if (value != NULL) {
11969e287199Sad 		pthread__stacksize = atoi(value) * 1024;
11979e287199Sad 		if (pthread__stacksize > slimit.rlim_cur)
11989e287199Sad 			pthread__stacksize = (size_t)slimit.rlim_cur;
11999e287199Sad 	}
12009e287199Sad 	if (pthread__stacksize == 0)
12019e287199Sad 		pthread__stacksize = (size_t)slimit.rlim_cur;
12029e287199Sad 	if (pthread__stacksize < 4 * pagesize)
12039e287199Sad 		errx(1, "Stacksize limit is too low, minimum %zd kbyte.",
12049e287199Sad 		    4 * pagesize / 1024);
12059e287199Sad 
12069e287199Sad 	pthread__stacksize_lg = -1;
12079e287199Sad 	while (pthread__stacksize) {
12089e287199Sad 		pthread__stacksize >>= 1;
12099e287199Sad 		pthread__stacksize_lg++;
12109e287199Sad 	}
12119e287199Sad 
12129e287199Sad 	pthread__stacksize = (1 << pthread__stacksize_lg);
12139e287199Sad 	pthread__stackmask = pthread__stacksize - 1;
1214f4fd6b79Sad 	pthread__threadmask = ~pthread__stackmask;
12159e287199Sad 
1216f4fd6b79Sad 	base = (void *)(pthread__sp() & pthread__threadmask);
12179e287199Sad 	size = pthread__stacksize;
12189e287199Sad 
12199e287199Sad 	error = pthread__stackid_setup(base, size, &t);
12209e287199Sad 	if (error) {
12219e287199Sad 		/* XXX */
12229e287199Sad 		errx(2, "failed to setup main thread: error=%d", error);
12239e287199Sad 	}
12249e287199Sad 
12259e287199Sad 	*newt = t;
122666ac2ffaSad 
122766ac2ffaSad #ifdef PTHREAD__HAVE_THREADREG
122866ac2ffaSad 	/* Set up identity register. */
122966ac2ffaSad 	pthread__threadreg_set(t);
123066ac2ffaSad #endif
12319e287199Sad }
12329e287199Sad 
12339e287199Sad static int
12349e287199Sad /*ARGSUSED*/
12359e287199Sad pthread__stackid_setup(void *base, size_t size, pthread_t *tp)
12369e287199Sad {
12379e287199Sad 	pthread_t t;
12389e287199Sad 	void *redaddr;
12399e287199Sad 	size_t pagesize;
12409e287199Sad 	int ret;
12419e287199Sad 
12429e287199Sad 	t = base;
12439e287199Sad 	pagesize = (size_t)sysconf(_SC_PAGESIZE);
12449e287199Sad 
12459e287199Sad 	/*
12469e287199Sad 	 * Put a pointer to the pthread in the bottom (but
12479e287199Sad          * redzone-protected section) of the stack.
12489e287199Sad 	 */
12499e287199Sad 	redaddr = STACK_SHRINK(STACK_MAX(base, size), pagesize);
12509e287199Sad 	t->pt_stack.ss_size = size - 2 * pagesize;
12519e287199Sad #ifdef __MACHINE_STACK_GROWS_UP
12529e287199Sad 	t->pt_stack.ss_sp = (char *)(void *)base + pagesize;
12539e287199Sad #else
12549e287199Sad 	t->pt_stack.ss_sp = (char *)(void *)base + 2 * pagesize;
12559e287199Sad #endif
12569e287199Sad 
12579e287199Sad 	/* Protect the next-to-bottom stack page as a red zone. */
12589e287199Sad 	ret = mprotect(redaddr, pagesize, PROT_NONE);
12599e287199Sad 	if (ret == -1) {
12609e287199Sad 		return errno;
12619e287199Sad 	}
12629e287199Sad 	*tp = t;
12639e287199Sad 	return 0;
12649e287199Sad }
12659583eeb2Sad 
12669583eeb2Sad #ifndef lint
12679583eeb2Sad static int
12689583eeb2Sad pthread__cmp(struct __pthread_st *a, struct __pthread_st *b)
12699583eeb2Sad {
12709583eeb2Sad 	return b - a;
12719583eeb2Sad }
12729583eeb2Sad RB_GENERATE_STATIC(__pthread__alltree, __pthread_st, pt_alltree, pthread__cmp)
12739583eeb2Sad #endif
12749583eeb2Sad 
127515e9cec1Sad /* Because getenv() wants to use locks. */
127615e9cec1Sad char *
127715e9cec1Sad pthread__getenv(const char *name)
127815e9cec1Sad {
127915e9cec1Sad 	extern char *__findenv(const char *, int *);
128015e9cec1Sad 	int off;
128115e9cec1Sad 
128215e9cec1Sad 	return __findenv(name, &off);
128315e9cec1Sad }
128415e9cec1Sad 
1285*2bcb8bf1Sad pthread_mutex_t *
1286*2bcb8bf1Sad pthread__hashlock(volatile const void *p)
1287*2bcb8bf1Sad {
1288*2bcb8bf1Sad 	uintptr_t v;
128915e9cec1Sad 
1290*2bcb8bf1Sad 	v = (uintptr_t)p;
1291*2bcb8bf1Sad 	return &hashlocks[((v >> 9) ^ (v >> 3)) & (NHASHLOCK - 1)].mutex;
1292*2bcb8bf1Sad }
1293