xref: /netbsd-src/lib/libpthread/pthread.c (revision 7c3f385475147b6e1c4753f2bee961630e2dfc40)
1 /*	$NetBSD: pthread.c,v 1.99 2008/03/22 14:19:27 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.99 2008/03/22 14:19:27 ad Exp $");
41 
42 #define	__EXPOSE_STACK	1
43 
44 #include <sys/param.h>
45 #include <sys/mman.h>
46 #include <sys/sysctl.h>
47 #include <sys/lwpctl.h>
48 
49 #include <err.h>
50 #include <errno.h>
51 #include <lwp.h>
52 #include <signal.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <string.h>
56 #include <syslog.h>
57 #include <ucontext.h>
58 #include <unistd.h>
59 #include <sched.h>
60 
61 #include "pthread.h"
62 #include "pthread_int.h"
63 
64 pthread_rwlock_t pthread__alltree_lock = PTHREAD_RWLOCK_INITIALIZER;
65 RB_HEAD(__pthread__alltree, __pthread_st) pthread__alltree;
66 
67 #ifndef lint
68 static int	pthread__cmp(struct __pthread_st *, struct __pthread_st *);
69 RB_PROTOTYPE_STATIC(__pthread__alltree, __pthread_st, pt_alltree, pthread__cmp)
70 #endif
71 
72 static void	pthread__create_tramp(pthread_t, void *(*)(void *), void *);
73 static void	pthread__initthread(pthread_t);
74 static void	pthread__scrubthread(pthread_t, char *, int);
75 static int	pthread__stackid_setup(void *, size_t, pthread_t *);
76 static int	pthread__stackalloc(pthread_t *);
77 static void	pthread__initmain(pthread_t *);
78 static void	pthread__fork_callback(void);
79 static void	pthread__reap(pthread_t);
80 static void	pthread__child_callback(void);
81 static void	pthread__start(void);
82 
83 void	pthread__init(void);
84 
85 int pthread__started;
86 pthread_mutex_t pthread__deadqueue_lock = PTHREAD_MUTEX_INITIALIZER;
87 pthread_queue_t pthread__deadqueue;
88 pthread_queue_t pthread__allqueue;
89 
90 static pthread_attr_t pthread_default_attr;
91 static lwpctl_t pthread__dummy_lwpctl = { .lc_curcpu = LWPCTL_CPU_NONE };
92 static pthread_t pthread__first;
93 
94 enum {
95 	DIAGASSERT_ABORT =	1<<0,
96 	DIAGASSERT_STDERR =	1<<1,
97 	DIAGASSERT_SYSLOG =	1<<2
98 };
99 
100 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
101 
102 int pthread__concurrency;
103 int pthread__nspins;
104 int pthread__unpark_max = PTHREAD__UNPARK_MAX;
105 int pthread__osrev;
106 
107 /*
108  * We have to initialize the pthread_stack* variables here because
109  * mutexes are used before pthread_init() and thus pthread__initmain()
110  * are called.  Since mutexes only save the stack pointer and not a
111  * pointer to the thread data, it is safe to change the mapping from
112  * stack pointer to thread data afterwards.
113  */
114 #define	_STACKSIZE_LG 18
115 int	pthread__stacksize_lg = _STACKSIZE_LG;
116 size_t	pthread__stacksize = 1 << _STACKSIZE_LG;
117 vaddr_t	pthread__stackmask = (1 << _STACKSIZE_LG) - 1;
118 vaddr_t pthread__threadmask = (vaddr_t)~((1 << _STACKSIZE_LG) - 1);
119 #undef	_STACKSIZE_LG
120 
121 int _sys___sigprocmask14(int, const sigset_t *, sigset_t *);
122 
123 __strong_alias(__libc_thr_self,pthread_self)
124 __strong_alias(__libc_thr_create,pthread_create)
125 __strong_alias(__libc_thr_exit,pthread_exit)
126 __strong_alias(__libc_thr_errno,pthread__errno)
127 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
128 __strong_alias(__libc_thr_equal,pthread_equal)
129 __strong_alias(__libc_thr_init,pthread__init)
130 
131 /*
132  * Static library kludge.  Place a reference to a symbol any library
133  * file which does not already have a reference here.
134  */
135 extern int pthread__cancel_stub_binder;
136 
137 void *pthread__static_lib_binder[] = {
138 	&pthread__cancel_stub_binder,
139 	pthread_cond_init,
140 	pthread_mutex_init,
141 	pthread_rwlock_init,
142 	pthread_barrier_init,
143 	pthread_key_create,
144 	pthread_setspecific,
145 };
146 
147 /*
148  * This needs to be started by the library loading code, before main()
149  * gets to run, for various things that use the state of the initial thread
150  * to work properly (thread-specific data is an application-visible example;
151  * spinlock counts for mutexes is an internal example).
152  */
153 void
154 pthread__init(void)
155 {
156 	pthread_t first;
157 	char *p;
158 	int i, mib[2];
159 	size_t len;
160 	extern int __isthreaded;
161 
162 	mib[0] = CTL_HW;
163 	mib[1] = HW_NCPU;
164 
165 	len = sizeof(pthread__concurrency);
166 	if (sysctl(mib, 2, &pthread__concurrency, &len, NULL, 0) == -1)
167 		err(1, "sysctl(hw.ncpu");
168 
169 	mib[0] = CTL_KERN;
170 	mib[1] = KERN_OSREV;
171 
172 	len = sizeof(pthread__osrev);
173 	if (sysctl(mib, 2, &pthread__osrev, &len, NULL, 0) == -1)
174 		err(1, "sysctl(hw.osrevision");
175 
176 	/* Initialize locks first; they're needed elsewhere. */
177 	pthread__lockprim_init();
178 
179 	/* Fetch parameters. */
180 	i = (int)_lwp_unpark_all(NULL, 0, NULL);
181 	if (i == -1)
182 		err(1, "_lwp_unpark_all");
183 	if (i < pthread__unpark_max)
184 		pthread__unpark_max = i;
185 
186 	/* Basic data structure setup */
187 	pthread_attr_init(&pthread_default_attr);
188 	PTQ_INIT(&pthread__allqueue);
189 	PTQ_INIT(&pthread__deadqueue);
190 	RB_INIT(&pthread__alltree);
191 
192 	/* Create the thread structure corresponding to main() */
193 	pthread__initmain(&first);
194 	pthread__initthread(first);
195 	pthread__scrubthread(first, NULL, 0);
196 
197 	first->pt_lid = _lwp_self();
198 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
199 	RB_INSERT(__pthread__alltree, &pthread__alltree, first);
200 
201 	if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &first->pt_lwpctl) != 0) {
202 		err(1, "_lwp_ctl");
203 	}
204 
205 	/* Start subsystems */
206 	PTHREAD_MD_INIT
207 
208 	for (p = pthread__getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
209 		switch (*p) {
210 		case 'a':
211 			pthread__diagassert |= DIAGASSERT_ABORT;
212 			break;
213 		case 'A':
214 			pthread__diagassert &= ~DIAGASSERT_ABORT;
215 			break;
216 		case 'e':
217 			pthread__diagassert |= DIAGASSERT_STDERR;
218 			break;
219 		case 'E':
220 			pthread__diagassert &= ~DIAGASSERT_STDERR;
221 			break;
222 		case 'l':
223 			pthread__diagassert |= DIAGASSERT_SYSLOG;
224 			break;
225 		case 'L':
226 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
227 			break;
228 		}
229 	}
230 
231 	/* Tell libc that we're here and it should role-play accordingly. */
232 	pthread__first = first;
233 	pthread_atfork(NULL, NULL, pthread__fork_callback);
234 	__isthreaded = 1;
235 }
236 
237 static void
238 pthread__fork_callback(void)
239 {
240 
241 	/* lwpctl state is not copied across fork. */
242 	if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &pthread__first->pt_lwpctl)) {
243 		err(1, "_lwp_ctl");
244 	}
245 }
246 
247 static void
248 pthread__child_callback(void)
249 {
250 
251 	/*
252 	 * Clean up data structures that a forked child process might
253 	 * trip over. Note that if threads have been created (causing
254 	 * this handler to be registered) the standards say that the
255 	 * child will trigger undefined behavior if it makes any
256 	 * pthread_* calls (or any other calls that aren't
257 	 * async-signal-safe), so we don't really have to clean up
258 	 * much. Anything that permits some pthread_* calls to work is
259 	 * merely being polite.
260 	 */
261 	pthread__started = 0;
262 }
263 
264 static void
265 pthread__start(void)
266 {
267 
268 	/*
269 	 * Per-process timers are cleared by fork(); despite the
270 	 * various restrictions on fork() and threads, it's legal to
271 	 * fork() before creating any threads.
272 	 */
273 	pthread_atfork(NULL, NULL, pthread__child_callback);
274 }
275 
276 
277 /* General-purpose thread data structure sanitization. */
278 /* ARGSUSED */
279 static void
280 pthread__initthread(pthread_t t)
281 {
282 
283 	t->pt_self = t;
284 	t->pt_magic = PT_MAGIC;
285 	t->pt_willpark = 0;
286 	t->pt_unpark = 0;
287 	t->pt_sleeponq = 0;
288 	t->pt_nwaiters = 0;
289 	t->pt_sleepobj = NULL;
290 	t->pt_signalled = 0;
291 	t->pt_havespecific = 0;
292 	t->pt_early = NULL;
293 	t->pt_lwpctl = &pthread__dummy_lwpctl;
294 	t->pt_blocking = 0;
295 	t->pt_droplock = NULL;
296 
297 	memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops));
298 	pthread_mutex_init(&t->pt_lock, NULL);
299 	PTQ_INIT(&t->pt_cleanup_stack);
300 	pthread_cond_init(&t->pt_joiners, NULL);
301 	memset(&t->pt_specific, 0, sizeof(t->pt_specific));
302 }
303 
304 static void
305 pthread__scrubthread(pthread_t t, char *name, int flags)
306 {
307 
308 	t->pt_state = PT_STATE_RUNNING;
309 	t->pt_exitval = NULL;
310 	t->pt_flags = flags;
311 	t->pt_cancel = 0;
312 	t->pt_errno = 0;
313 	t->pt_name = name;
314 	t->pt_lid = 0;
315 }
316 
317 
318 int
319 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
320 	    void *(*startfunc)(void *), void *arg)
321 {
322 	pthread_t newthread;
323 	pthread_attr_t nattr;
324 	struct pthread_attr_private *p;
325 	char * volatile name;
326 	unsigned long flag;
327 	int ret;
328 
329 	/*
330 	 * It's okay to check this without a lock because there can
331 	 * only be one thread before it becomes true.
332 	 */
333 	if (pthread__started == 0) {
334 		pthread__start();
335 		pthread__started = 1;
336 	}
337 
338 	if (attr == NULL)
339 		nattr = pthread_default_attr;
340 	else if (attr->pta_magic == PT_ATTR_MAGIC)
341 		nattr = *attr;
342 	else
343 		return EINVAL;
344 
345 	/* Fetch misc. attributes from the attr structure. */
346 	name = NULL;
347 	if ((p = nattr.pta_private) != NULL)
348 		if (p->ptap_name[0] != '\0')
349 			if ((name = strdup(p->ptap_name)) == NULL)
350 				return ENOMEM;
351 
352 	newthread = NULL;
353 
354 	/*
355 	 * Try to reclaim a dead thread.
356 	 */
357 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
358 		pthread_mutex_lock(&pthread__deadqueue_lock);
359 		newthread = PTQ_FIRST(&pthread__deadqueue);
360 		if (newthread != NULL) {
361 			PTQ_REMOVE(&pthread__deadqueue, newthread, pt_deadq);
362 			pthread_mutex_unlock(&pthread__deadqueue_lock);
363 			/* Still running? */
364 			if (newthread->pt_lwpctl->lc_curcpu !=
365 			    LWPCTL_CPU_EXITED &&
366 			    (_lwp_kill(newthread->pt_lid, 0) == 0 ||
367 			    errno != ESRCH)) {
368 				pthread_mutex_lock(&pthread__deadqueue_lock);
369 				PTQ_INSERT_TAIL(&pthread__deadqueue,
370 				    newthread, pt_deadq);
371 				pthread_mutex_unlock(&pthread__deadqueue_lock);
372 				newthread = NULL;
373 			}
374 		} else
375 			pthread_mutex_unlock(&pthread__deadqueue_lock);
376 	}
377 
378 	/*
379 	 * If necessary set up a stack, allocate space for a pthread_st,
380 	 * and initialize it.
381 	 */
382 	if (newthread == NULL) {
383 		ret = pthread__stackalloc(&newthread);
384 		if (ret != 0) {
385 			if (name)
386 				free(name);
387 			return ret;
388 		}
389 
390 		/* This is used only when creating the thread. */
391 		_INITCONTEXT_U(&newthread->pt_uc);
392 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
393 		pthread__uc_id(&newthread->pt_uc) = newthread;
394 #endif
395 		newthread->pt_uc.uc_stack = newthread->pt_stack;
396 		newthread->pt_uc.uc_link = NULL;
397 
398 		/* Add to list of all threads. */
399 		pthread_rwlock_wrlock(&pthread__alltree_lock);
400 		PTQ_INSERT_TAIL(&pthread__allqueue, newthread, pt_allq);
401 		RB_INSERT(__pthread__alltree, &pthread__alltree, newthread);
402 		pthread_rwlock_unlock(&pthread__alltree_lock);
403 
404 		/* Will be reset by the thread upon exit. */
405 		pthread__initthread(newthread);
406 	}
407 
408 	/*
409 	 * Create the new LWP.
410 	 */
411 	pthread__scrubthread(newthread, name, nattr.pta_flags);
412 	makecontext(&newthread->pt_uc, pthread__create_tramp, 3,
413 	    newthread, startfunc, arg);
414 
415 	flag = LWP_DETACHED;
416 	if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0)
417 		flag |= LWP_SUSPENDED;
418 	ret = _lwp_create(&newthread->pt_uc, flag, &newthread->pt_lid);
419 	if (ret != 0) {
420 		free(name);
421 		newthread->pt_state = PT_STATE_DEAD;
422 		pthread_mutex_lock(&pthread__deadqueue_lock);
423 		PTQ_INSERT_HEAD(&pthread__deadqueue, newthread, pt_deadq);
424 		pthread_mutex_unlock(&pthread__deadqueue_lock);
425 		return ret;
426 	}
427 
428 	*thread = newthread;
429 
430 	return 0;
431 }
432 
433 
434 static void
435 pthread__create_tramp(pthread_t self, void *(*start)(void *), void *arg)
436 {
437 	void *retval;
438 
439 #ifdef PTHREAD__HAVE_THREADREG
440 	/* Set up identity register. */
441 	pthread__threadreg_set(self);
442 #endif
443 
444 	/*
445 	 * Throw away some stack in a feeble attempt to reduce cache
446 	 * thrash.  May help for SMT processors.  XXX We should not
447 	 * be allocating stacks on fixed 2MB boundaries.  Needs a
448 	 * thread register or decent thread local storage.  Note
449 	 * that pt_lid may not be set by this point, but we don't
450 	 * care.
451 	 */
452 	(void)alloca(((unsigned)self->pt_lid & 7) << 8);
453 
454 	if (self->pt_name != NULL) {
455 		pthread_mutex_lock(&self->pt_lock);
456 		if (self->pt_name != NULL)
457 			(void)_lwp_setname(0, self->pt_name);
458 		pthread_mutex_unlock(&self->pt_lock);
459 	}
460 
461 	if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) {
462 		err(1, "_lwp_ctl");
463 	}
464 
465 	retval = (*start)(arg);
466 
467 	pthread_exit(retval);
468 
469 	/*NOTREACHED*/
470 	pthread__abort();
471 }
472 
473 int
474 pthread_suspend_np(pthread_t thread)
475 {
476 	pthread_t self;
477 
478 	self = pthread__self();
479 	if (self == thread) {
480 		return EDEADLK;
481 	}
482 	if (pthread__find(thread) != 0)
483 		return ESRCH;
484 	if (_lwp_suspend(thread->pt_lid) == 0)
485 		return 0;
486 	return errno;
487 }
488 
489 int
490 pthread_resume_np(pthread_t thread)
491 {
492 
493 	if (pthread__find(thread) != 0)
494 		return ESRCH;
495 	if (_lwp_continue(thread->pt_lid) == 0)
496 		return 0;
497 	return errno;
498 }
499 
500 void
501 pthread_exit(void *retval)
502 {
503 	pthread_t self;
504 	struct pt_clean_t *cleanup;
505 	char *name;
506 
507 	self = pthread__self();
508 
509 	/* Disable cancellability. */
510 	pthread_mutex_lock(&self->pt_lock);
511 	self->pt_flags |= PT_FLAG_CS_DISABLED;
512 	self->pt_cancel = 0;
513 
514 	/* Call any cancellation cleanup handlers */
515 	if (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
516 		pthread_mutex_unlock(&self->pt_lock);
517 		while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
518 			cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
519 			PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
520 			(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
521 		}
522 		pthread_mutex_lock(&self->pt_lock);
523 	}
524 
525 	/* Perform cleanup of thread-specific data */
526 	pthread__destroy_tsd(self);
527 
528 	/* Signal our exit. */
529 	self->pt_exitval = retval;
530 	if (self->pt_flags & PT_FLAG_DETACHED) {
531 		self->pt_state = PT_STATE_DEAD;
532 		name = self->pt_name;
533 		self->pt_name = NULL;
534 		pthread_mutex_unlock(&self->pt_lock);
535 		if (name != NULL)
536 			free(name);
537 		pthread_mutex_lock(&pthread__deadqueue_lock);
538 		PTQ_INSERT_TAIL(&pthread__deadqueue, self, pt_deadq);
539 		pthread_mutex_unlock(&pthread__deadqueue_lock);
540 		_lwp_exit();
541 	} else {
542 		self->pt_state = PT_STATE_ZOMBIE;
543 		pthread_cond_broadcast(&self->pt_joiners);
544 		pthread_mutex_unlock(&self->pt_lock);
545 		/* Note: name will be freed by the joiner. */
546 		_lwp_exit();
547 	}
548 
549 	/*NOTREACHED*/
550 	pthread__abort();
551 	exit(1);
552 }
553 
554 
555 int
556 pthread_join(pthread_t thread, void **valptr)
557 {
558 	pthread_t self;
559 	int error;
560 
561 	self = pthread__self();
562 
563 	if (pthread__find(thread) != 0)
564 		return ESRCH;
565 
566 	if (thread->pt_magic != PT_MAGIC)
567 		return EINVAL;
568 
569 	if (thread == self)
570 		return EDEADLK;
571 
572 	self->pt_droplock = &thread->pt_lock;
573 	pthread_mutex_lock(&thread->pt_lock);
574 	for (;;) {
575 		if (thread->pt_state == PT_STATE_ZOMBIE)
576 			break;
577 		if (thread->pt_state == PT_STATE_DEAD) {
578 			pthread_mutex_unlock(&thread->pt_lock);
579 			self->pt_droplock = NULL;
580 			return ESRCH;
581 		}
582 		if ((thread->pt_flags & PT_FLAG_DETACHED) != 0) {
583 			pthread_mutex_unlock(&thread->pt_lock);
584 			self->pt_droplock = NULL;
585 			return EINVAL;
586 		}
587 		error = pthread_cond_wait(&thread->pt_joiners,
588 		    &thread->pt_lock);
589 		if (error != 0) {
590 			pthread__errorfunc(__FILE__, __LINE__,
591 			    __func__, "unexpected return from cond_wait()");
592 		}
593 
594 	}
595 	if (valptr != NULL)
596 		*valptr = thread->pt_exitval;
597 	/* pthread__reap() will drop the lock. */
598 	pthread__reap(thread);
599 	self->pt_droplock = NULL;
600 
601 	return 0;
602 }
603 
604 static void
605 pthread__reap(pthread_t thread)
606 {
607 	char *name;
608 
609 	name = thread->pt_name;
610 	thread->pt_name = NULL;
611 	thread->pt_state = PT_STATE_DEAD;
612 	pthread_mutex_unlock(&thread->pt_lock);
613 
614 	pthread_mutex_lock(&pthread__deadqueue_lock);
615 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_deadq);
616 	pthread_mutex_unlock(&pthread__deadqueue_lock);
617 
618 	if (name != NULL)
619 		free(name);
620 }
621 
622 int
623 pthread_equal(pthread_t t1, pthread_t t2)
624 {
625 
626 	/* Nothing special here. */
627 	return (t1 == t2);
628 }
629 
630 
631 int
632 pthread_detach(pthread_t thread)
633 {
634 
635 	if (pthread__find(thread) != 0)
636 		return ESRCH;
637 
638 	if (thread->pt_magic != PT_MAGIC)
639 		return EINVAL;
640 
641 	pthread_mutex_lock(&thread->pt_lock);
642 	thread->pt_flags |= PT_FLAG_DETACHED;
643 	if (thread->pt_state == PT_STATE_ZOMBIE) {
644 		/* pthread__reap() will drop the lock. */
645 		pthread__reap(thread);
646 	} else {
647 		/*
648 		 * Not valid for threads to be waiting in
649 		 * pthread_join() (there are intractable
650 		 * sync issues from the application
651 		 * perspective), but give those threads
652 		 * a chance anyway.
653 		 */
654 		pthread_cond_broadcast(&thread->pt_joiners);
655 		pthread_mutex_unlock(&thread->pt_lock);
656 	}
657 
658 	return 0;
659 }
660 
661 
662 int
663 pthread_getname_np(pthread_t thread, char *name, size_t len)
664 {
665 
666 	if (pthread__find(thread) != 0)
667 		return ESRCH;
668 
669 	if (thread->pt_magic != PT_MAGIC)
670 		return EINVAL;
671 
672 	pthread_mutex_lock(&thread->pt_lock);
673 	if (thread->pt_name == NULL)
674 		name[0] = '\0';
675 	else
676 		strlcpy(name, thread->pt_name, len);
677 	pthread_mutex_unlock(&thread->pt_lock);
678 
679 	return 0;
680 }
681 
682 
683 int
684 pthread_setname_np(pthread_t thread, const char *name, void *arg)
685 {
686 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
687 	int namelen;
688 
689 	if (pthread__find(thread) != 0)
690 		return ESRCH;
691 
692 	if (thread->pt_magic != PT_MAGIC)
693 		return EINVAL;
694 
695 	namelen = snprintf(newname, sizeof(newname), name, arg);
696 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
697 		return EINVAL;
698 
699 	cp = strdup(newname);
700 	if (cp == NULL)
701 		return ENOMEM;
702 
703 	pthread_mutex_lock(&thread->pt_lock);
704 	oldname = thread->pt_name;
705 	thread->pt_name = cp;
706 	(void)_lwp_setname(thread->pt_lid, cp);
707 	pthread_mutex_unlock(&thread->pt_lock);
708 
709 	if (oldname != NULL)
710 		free(oldname);
711 
712 	return 0;
713 }
714 
715 
716 
717 /*
718  * XXX There should be a way for applications to use the efficent
719  *  inline version, but there are opacity/namespace issues.
720  */
721 pthread_t
722 pthread_self(void)
723 {
724 
725 	return pthread__self();
726 }
727 
728 
729 int
730 pthread_cancel(pthread_t thread)
731 {
732 
733 	if (pthread__find(thread) != 0)
734 		return ESRCH;
735 	pthread_mutex_lock(&thread->pt_lock);
736 	thread->pt_flags |= PT_FLAG_CS_PENDING;
737 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
738 		thread->pt_cancel = 1;
739 		pthread_mutex_unlock(&thread->pt_lock);
740 		_lwp_wakeup(thread->pt_lid);
741 	} else
742 		pthread_mutex_unlock(&thread->pt_lock);
743 
744 	return 0;
745 }
746 
747 
748 int
749 pthread_setcancelstate(int state, int *oldstate)
750 {
751 	pthread_t self;
752 	int retval;
753 
754 	self = pthread__self();
755 	retval = 0;
756 
757 	pthread_mutex_lock(&self->pt_lock);
758 
759 	if (oldstate != NULL) {
760 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
761 			*oldstate = PTHREAD_CANCEL_DISABLE;
762 		else
763 			*oldstate = PTHREAD_CANCEL_ENABLE;
764 	}
765 
766 	if (state == PTHREAD_CANCEL_DISABLE) {
767 		self->pt_flags |= PT_FLAG_CS_DISABLED;
768 		if (self->pt_cancel) {
769 			self->pt_flags |= PT_FLAG_CS_PENDING;
770 			self->pt_cancel = 0;
771 		}
772 	} else if (state == PTHREAD_CANCEL_ENABLE) {
773 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
774 		/*
775 		 * If a cancellation was requested while cancellation
776 		 * was disabled, note that fact for future
777 		 * cancellation tests.
778 		 */
779 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
780 			self->pt_cancel = 1;
781 			/* This is not a deferred cancellation point. */
782 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
783 				pthread_mutex_unlock(&self->pt_lock);
784 				pthread__cancelled();
785 			}
786 		}
787 	} else
788 		retval = EINVAL;
789 
790 	pthread_mutex_unlock(&self->pt_lock);
791 
792 	return retval;
793 }
794 
795 
796 int
797 pthread_setcanceltype(int type, int *oldtype)
798 {
799 	pthread_t self;
800 	int retval;
801 
802 	self = pthread__self();
803 	retval = 0;
804 
805 	pthread_mutex_lock(&self->pt_lock);
806 
807 	if (oldtype != NULL) {
808 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
809 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
810 		else
811 			*oldtype = PTHREAD_CANCEL_DEFERRED;
812 	}
813 
814 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
815 		self->pt_flags |= PT_FLAG_CS_ASYNC;
816 		if (self->pt_cancel) {
817 			pthread_mutex_unlock(&self->pt_lock);
818 			pthread__cancelled();
819 		}
820 	} else if (type == PTHREAD_CANCEL_DEFERRED)
821 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
822 	else
823 		retval = EINVAL;
824 
825 	pthread_mutex_unlock(&self->pt_lock);
826 
827 	return retval;
828 }
829 
830 
831 void
832 pthread_testcancel(void)
833 {
834 	pthread_t self;
835 
836 	self = pthread__self();
837 	if (self->pt_cancel)
838 		pthread__cancelled();
839 }
840 
841 
842 /*
843  * POSIX requires that certain functions return an error rather than
844  * invoking undefined behavior even when handed completely bogus
845  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
846  * utility routine searches the list of threads for the pthread_t
847  * value without dereferencing it.
848  */
849 int
850 pthread__find(pthread_t id)
851 {
852 	pthread_t target;
853 
854 	pthread_rwlock_rdlock(&pthread__alltree_lock);
855 	/* LINTED */
856 	target = RB_FIND(__pthread__alltree, &pthread__alltree, id);
857 	pthread_rwlock_unlock(&pthread__alltree_lock);
858 
859 	if (target == NULL || target->pt_state == PT_STATE_DEAD)
860 		return ESRCH;
861 
862 	return 0;
863 }
864 
865 
866 void
867 pthread__testcancel(pthread_t self)
868 {
869 
870 	if (self->pt_cancel)
871 		pthread__cancelled();
872 }
873 
874 
875 void
876 pthread__cancelled(void)
877 {
878 	pthread_mutex_t *droplock;
879 	pthread_t self;
880 
881 	self = pthread__self();
882 	droplock = self->pt_droplock;
883 	self->pt_droplock = NULL;
884 
885 	if (droplock != NULL && pthread_mutex_held_np(droplock))
886 		pthread_mutex_unlock(droplock);
887 
888 	pthread_exit(PTHREAD_CANCELED);
889 }
890 
891 
892 void
893 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
894 {
895 	pthread_t self;
896 	struct pt_clean_t *entry;
897 
898 	self = pthread__self();
899 	entry = store;
900 	entry->ptc_cleanup = cleanup;
901 	entry->ptc_arg = arg;
902 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
903 }
904 
905 
906 void
907 pthread__cleanup_pop(int ex, void *store)
908 {
909 	pthread_t self;
910 	struct pt_clean_t *entry;
911 
912 	self = pthread__self();
913 	entry = store;
914 
915 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
916 	if (ex)
917 		(*entry->ptc_cleanup)(entry->ptc_arg);
918 }
919 
920 
921 int *
922 pthread__errno(void)
923 {
924 	pthread_t self;
925 
926 	self = pthread__self();
927 
928 	return &(self->pt_errno);
929 }
930 
931 ssize_t	_sys_write(int, const void *, size_t);
932 
933 void
934 pthread__assertfunc(const char *file, int line, const char *function,
935 		    const char *expr)
936 {
937 	char buf[1024];
938 	int len;
939 
940 	/*
941 	 * snprintf should not acquire any locks, or we could
942 	 * end up deadlocked if the assert caller held locks.
943 	 */
944 	len = snprintf(buf, 1024,
945 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
946 	    expr, file, line,
947 	    function ? ", function \"" : "",
948 	    function ? function : "",
949 	    function ? "\"" : "");
950 
951 	_sys_write(STDERR_FILENO, buf, (size_t)len);
952 	(void)kill(getpid(), SIGABRT);
953 
954 	_exit(1);
955 }
956 
957 
958 void
959 pthread__errorfunc(const char *file, int line, const char *function,
960 		   const char *msg)
961 {
962 	char buf[1024];
963 	size_t len;
964 
965 	if (pthread__diagassert == 0)
966 		return;
967 
968 	/*
969 	 * snprintf should not acquire any locks, or we could
970 	 * end up deadlocked if the assert caller held locks.
971 	 */
972 	len = snprintf(buf, 1024,
973 	    "%s: Error detected by libpthread: %s.\n"
974 	    "Detected by file \"%s\", line %d%s%s%s.\n"
975 	    "See pthread(3) for information.\n",
976 	    getprogname(), msg, file, line,
977 	    function ? ", function \"" : "",
978 	    function ? function : "",
979 	    function ? "\"" : "");
980 
981 	if (pthread__diagassert & DIAGASSERT_STDERR)
982 		_sys_write(STDERR_FILENO, buf, len);
983 
984 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
985 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
986 
987 	if (pthread__diagassert & DIAGASSERT_ABORT) {
988 		(void)kill(getpid(), SIGABRT);
989 		_exit(1);
990 	}
991 }
992 
993 /*
994  * Thread park/unpark operations.  The kernel operations are
995  * modelled after a brief description from "Multithreading in
996  * the Solaris Operating Environment":
997  *
998  * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf
999  */
1000 
1001 #define	OOPS(msg)			\
1002     pthread__errorfunc(__FILE__, __LINE__, __func__, msg)
1003 
1004 int
1005 pthread__park(pthread_t self, pthread_spin_t *lock,
1006 	      pthread_queue_t *queue, const struct timespec *abstime,
1007 	      int cancelpt, const void *hint)
1008 {
1009 	int rv, error;
1010 	void *obj;
1011 
1012 	/* Clear the willpark flag, since we're about to block. */
1013 	self->pt_willpark = 0;
1014 
1015 	/*
1016 	 * For non-interlocked release of mutexes we need a store
1017 	 * barrier before incrementing pt_blocking away from zero.
1018 	 * This is provided by the caller (it will release an
1019 	 * interlock, or do an explicit barrier).
1020 	 */
1021 	self->pt_blocking++;
1022 
1023 	/*
1024 	 * Wait until we are awoken by a pending unpark operation,
1025 	 * a signal, an unpark posted after we have gone asleep,
1026 	 * or an expired timeout.
1027 	 *
1028 	 * It is fine to test the value of both pt_sleepobj and
1029 	 * pt_sleeponq without holding any locks, because:
1030 	 *
1031 	 * o Only the blocking thread (this thread) ever sets them
1032 	 *   to a non-NULL value.
1033 	 *
1034 	 * o Other threads may set them NULL, but if they do so they
1035 	 *   must also make this thread return from _lwp_park.
1036 	 *
1037 	 * o _lwp_park, _lwp_unpark and _lwp_unpark_all are system
1038 	 *   calls and all make use of spinlocks in the kernel.  So
1039 	 *   these system calls act as full memory barriers, and will
1040 	 *   ensure that the calling CPU's store buffers are drained.
1041 	 *   In combination with the spinlock release before unpark,
1042 	 *   this means that modification of pt_sleepobj/onq by another
1043 	 *   thread will become globally visible before that thread
1044 	 *   schedules an unpark operation on this thread.
1045 	 *
1046 	 * Note: the test in the while() statement dodges the park op if
1047 	 * we have already been awoken, unless there is another thread to
1048 	 * awaken.  This saves a syscall - if we were already awakened,
1049 	 * the next call to _lwp_park() would need to return early in order
1050 	 * to eat the previous wakeup.
1051 	 */
1052 	rv = 0;
1053 	while ((self->pt_sleepobj != NULL || self->pt_unpark != 0) && rv == 0) {
1054 		/*
1055 		 * If we deferred unparking a thread, arrange to
1056 		 * have _lwp_park() restart it before blocking.
1057 		 */
1058 		error = _lwp_park(abstime, self->pt_unpark, hint,
1059 		    self->pt_unparkhint);
1060 		self->pt_unpark = 0;
1061 		if (error != 0) {
1062 			switch (rv = errno) {
1063 			case EINTR:
1064 			case EALREADY:
1065 				rv = 0;
1066 				break;
1067 			case ETIMEDOUT:
1068 				break;
1069 			default:
1070 				OOPS("_lwp_park failed");
1071 				break;
1072 			}
1073 		}
1074 		/* Check for cancellation. */
1075 		if (cancelpt && self->pt_cancel)
1076 			rv = EINTR;
1077 	}
1078 
1079 	/*
1080 	 * If we have been awoken early but are still on the queue,
1081 	 * then remove ourself.  Again, it's safe to do the test
1082 	 * without holding any locks.
1083 	 */
1084 	if (__predict_false(self->pt_sleeponq)) {
1085 		pthread__spinlock(self, lock);
1086 		if (self->pt_sleeponq) {
1087 			PTQ_REMOVE(queue, self, pt_sleep);
1088 			obj = self->pt_sleepobj;
1089 			self->pt_sleepobj = NULL;
1090 			self->pt_sleeponq = 0;
1091 			if (obj != NULL && self->pt_early != NULL)
1092 				(*self->pt_early)(obj);
1093 		}
1094 		pthread__spinunlock(self, lock);
1095 	}
1096 	self->pt_early = NULL;
1097 	self->pt_blocking--;
1098 
1099 	return rv;
1100 }
1101 
1102 void
1103 pthread__unpark(pthread_t self, pthread_spin_t *lock,
1104 		pthread_queue_t *queue, pthread_t target)
1105 {
1106 	int rv;
1107 
1108 	if (target == NULL) {
1109 		pthread__spinunlock(self, lock);
1110 		return;
1111 	}
1112 
1113 	/*
1114 	 * Easy: the thread has already been removed from
1115 	 * the queue, so just awaken it.
1116 	 */
1117 	target->pt_sleepobj = NULL;
1118 	target->pt_sleeponq = 0;
1119 
1120 	/*
1121 	 * Releasing the spinlock serves as a store barrier,
1122 	 * which ensures that all our modifications are visible
1123 	 * to the thread in pthread__park() before the unpark
1124 	 * operation is set in motion.
1125 	 */
1126 	pthread__spinunlock(self, lock);
1127 
1128 	/*
1129 	 * If the calling thread is about to block, defer
1130 	 * unparking the target until _lwp_park() is called.
1131 	 */
1132 	if (self->pt_willpark && self->pt_unpark == 0) {
1133 		self->pt_unpark = target->pt_lid;
1134 		self->pt_unparkhint = queue;
1135 	} else {
1136 		rv = _lwp_unpark(target->pt_lid, queue);
1137 		if (rv != 0 && errno != EALREADY && errno != EINTR) {
1138 			OOPS("_lwp_unpark failed");
1139 		}
1140 	}
1141 }
1142 
1143 void
1144 pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
1145 		    pthread_queue_t *queue)
1146 {
1147 	ssize_t n, rv;
1148 	pthread_t thread, next;
1149 	void *wakeobj;
1150 
1151 	if (PTQ_EMPTY(queue) && self->pt_nwaiters == 0) {
1152 		pthread__spinunlock(self, lock);
1153 		return;
1154 	}
1155 
1156 	wakeobj = queue;
1157 
1158 	for (;;) {
1159 		/*
1160 		 * Pull waiters from the queue and add to this
1161 		 * thread's waiters list.
1162 		 */
1163 		thread = PTQ_FIRST(queue);
1164 		for (n = self->pt_nwaiters, self->pt_nwaiters = 0;
1165 		    n < pthread__unpark_max && thread != NULL;
1166 		    thread = next) {
1167 			/*
1168 			 * If the sleepobj pointer is non-NULL, it
1169 			 * means one of two things:
1170 			 *
1171 			 * o The thread has awoken early, spun
1172 			 *   through application code and is
1173 			 *   once more asleep on this object.
1174 			 *
1175 			 * o This is a new thread that has blocked
1176 			 *   on the object after we have released
1177 			 *   the interlock in this loop.
1178 			 *
1179 			 * In both cases we shouldn't remove the
1180 			 * thread from the queue.
1181 			 */
1182 			next = PTQ_NEXT(thread, pt_sleep);
1183 			if (thread->pt_sleepobj != wakeobj)
1184 				continue;
1185 			thread->pt_sleepobj = NULL;
1186 			thread->pt_sleeponq = 0;
1187 			self->pt_waiters[n++] = thread->pt_lid;
1188 			PTQ_REMOVE(queue, thread, pt_sleep);
1189 		}
1190 
1191 		/*
1192 		 * Releasing the spinlock serves as a store barrier,
1193 		 * which ensures that all our modifications are visible
1194 		 * to the thread in pthread__park() before the unpark
1195 		 * operation is set in motion.
1196 		 */
1197 		switch (n) {
1198 		case 0:
1199 			pthread__spinunlock(self, lock);
1200 			return;
1201 		case 1:
1202 			/*
1203 			 * If the calling thread is about to block,
1204 			 * defer unparking the target until _lwp_park()
1205 			 * is called.
1206 			 */
1207 			pthread__spinunlock(self, lock);
1208 			if (self->pt_willpark && self->pt_unpark == 0) {
1209 				self->pt_unpark = self->pt_waiters[0];
1210 				self->pt_unparkhint = queue;
1211 				return;
1212 			}
1213 			rv = (ssize_t)_lwp_unpark(self->pt_waiters[0], queue);
1214 			if (rv != 0 && errno != EALREADY && errno != EINTR) {
1215 				OOPS("_lwp_unpark failed");
1216 			}
1217 			return;
1218 		default:
1219 			/*
1220 			 * Clear all sleepobj pointers, since we
1221 			 * release the spin lock before awkening
1222 			 * everybody, and must synchronise with
1223 			 * pthread__park().
1224 			 */
1225 			while (thread != NULL) {
1226 				thread->pt_sleepobj = NULL;
1227 				thread = PTQ_NEXT(thread, pt_sleep);
1228 			}
1229 			/*
1230 			 * Now only interested in waking threads
1231 			 * marked to be woken (sleepobj == NULL).
1232 			 */
1233 			wakeobj = NULL;
1234 			pthread__spinunlock(self, lock);
1235 			rv = _lwp_unpark_all(self->pt_waiters, (size_t)n,
1236 			    queue);
1237 			if (rv != 0 && errno != EINTR) {
1238 				OOPS("_lwp_unpark_all failed");
1239 			}
1240 			break;
1241 		}
1242 		pthread__spinlock(self, lock);
1243 	}
1244 }
1245 
1246 #undef	OOPS
1247 
1248 /*
1249  * Allocate a stack for a thread, and set it up. It needs to be aligned, so
1250  * that a thread can find itself by its stack pointer.
1251  */
1252 static int
1253 pthread__stackalloc(pthread_t *newt)
1254 {
1255 	void *addr;
1256 
1257 	addr = mmap(NULL, pthread__stacksize, PROT_READ|PROT_WRITE,
1258 	    MAP_ANON|MAP_PRIVATE | MAP_ALIGNED(pthread__stacksize_lg),
1259 	    -1, (off_t)0);
1260 
1261 	if (addr == MAP_FAILED)
1262 		return ENOMEM;
1263 
1264 	pthread__assert(((intptr_t)addr & pthread__stackmask) == 0);
1265 
1266 	return pthread__stackid_setup(addr, pthread__stacksize, newt);
1267 }
1268 
1269 
1270 /*
1271  * Set up the slightly special stack for the "initial" thread, which
1272  * runs on the normal system stack, and thus gets slightly different
1273  * treatment.
1274  */
1275 static void
1276 pthread__initmain(pthread_t *newt)
1277 {
1278 	struct rlimit slimit;
1279 	size_t pagesize;
1280 	pthread_t t;
1281 	void *base;
1282 	size_t size;
1283 	int error, ret;
1284 	char *value;
1285 
1286 	pagesize = (size_t)sysconf(_SC_PAGESIZE);
1287 	pthread__stacksize = 0;
1288 	ret = getrlimit(RLIMIT_STACK, &slimit);
1289 	if (ret == -1)
1290 		err(1, "Couldn't get stack resource consumption limits");
1291 
1292 	value = pthread__getenv("PTHREAD_STACKSIZE");
1293 	if (value != NULL) {
1294 		pthread__stacksize = atoi(value) * 1024;
1295 		if (pthread__stacksize > slimit.rlim_cur)
1296 			pthread__stacksize = (size_t)slimit.rlim_cur;
1297 	}
1298 	if (pthread__stacksize == 0)
1299 		pthread__stacksize = (size_t)slimit.rlim_cur;
1300 	if (pthread__stacksize < 4 * pagesize)
1301 		errx(1, "Stacksize limit is too low, minimum %zd kbyte.",
1302 		    4 * pagesize / 1024);
1303 
1304 	pthread__stacksize_lg = -1;
1305 	while (pthread__stacksize) {
1306 		pthread__stacksize >>= 1;
1307 		pthread__stacksize_lg++;
1308 	}
1309 
1310 	pthread__stacksize = (1 << pthread__stacksize_lg);
1311 	pthread__stackmask = pthread__stacksize - 1;
1312 	pthread__threadmask = ~pthread__stackmask;
1313 
1314 	base = (void *)(pthread__sp() & pthread__threadmask);
1315 	size = pthread__stacksize;
1316 
1317 	error = pthread__stackid_setup(base, size, &t);
1318 	if (error) {
1319 		/* XXX */
1320 		errx(2, "failed to setup main thread: error=%d", error);
1321 	}
1322 
1323 	*newt = t;
1324 
1325 #ifdef PTHREAD__HAVE_THREADREG
1326 	/* Set up identity register. */
1327 	pthread__threadreg_set(t);
1328 #endif
1329 }
1330 
1331 static int
1332 /*ARGSUSED*/
1333 pthread__stackid_setup(void *base, size_t size, pthread_t *tp)
1334 {
1335 	pthread_t t;
1336 	void *redaddr;
1337 	size_t pagesize;
1338 	int ret;
1339 
1340 	t = base;
1341 	pagesize = (size_t)sysconf(_SC_PAGESIZE);
1342 
1343 	/*
1344 	 * Put a pointer to the pthread in the bottom (but
1345          * redzone-protected section) of the stack.
1346 	 */
1347 	redaddr = STACK_SHRINK(STACK_MAX(base, size), pagesize);
1348 	t->pt_stack.ss_size = size - 2 * pagesize;
1349 #ifdef __MACHINE_STACK_GROWS_UP
1350 	t->pt_stack.ss_sp = (char *)(void *)base + pagesize;
1351 #else
1352 	t->pt_stack.ss_sp = (char *)(void *)base + 2 * pagesize;
1353 #endif
1354 
1355 	/* Protect the next-to-bottom stack page as a red zone. */
1356 	ret = mprotect(redaddr, pagesize, PROT_NONE);
1357 	if (ret == -1) {
1358 		return errno;
1359 	}
1360 	*tp = t;
1361 	return 0;
1362 }
1363 
1364 #ifndef lint
1365 static int
1366 pthread__cmp(struct __pthread_st *a, struct __pthread_st *b)
1367 {
1368 	return b - a;
1369 }
1370 RB_GENERATE_STATIC(__pthread__alltree, __pthread_st, pt_alltree, pthread__cmp)
1371 #endif
1372 
1373 /* Because getenv() wants to use locks. */
1374 char *
1375 pthread__getenv(const char *name)
1376 {
1377 	extern char *__findenv(const char *, int *);
1378 	int off;
1379 
1380 	return __findenv(name, &off);
1381 }
1382 
1383 
1384