xref: /netbsd-src/lib/libpthread/pthread.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: pthread.c,v 1.36 2004/08/12 10:54:13 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001,2002,2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.36 2004/08/12 10:54:13 yamt Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <syslog.h>
50 #include <ucontext.h>
51 #include <unistd.h>
52 #include <sys/param.h>
53 #include <sys/sysctl.h>
54 
55 #include <sched.h>
56 #include "pthread.h"
57 #include "pthread_int.h"
58 
59 #ifdef PTHREAD_MAIN_DEBUG
60 #define SDPRINTF(x) DPRINTF(x)
61 #else
62 #define SDPRINTF(x)
63 #endif
64 
65 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
66 static void	pthread__dead(pthread_t, pthread_t);
67 
68 int pthread__started;
69 
70 pthread_spin_t pthread__allqueue_lock = __SIMPLELOCK_UNLOCKED;
71 struct pthread_queue_t pthread__allqueue;
72 
73 pthread_spin_t pthread__deadqueue_lock = __SIMPLELOCK_UNLOCKED;
74 struct pthread_queue_t pthread__deadqueue;
75 struct pthread_queue_t *pthread__reidlequeue;
76 
77 static int nthreads;
78 static int nextthread;
79 static pthread_spin_t nextthread_lock = __SIMPLELOCK_UNLOCKED;
80 static pthread_attr_t pthread_default_attr;
81 
82 enum {
83 	DIAGASSERT_ABORT =	1<<0,
84 	DIAGASSERT_STDERR =	1<<1,
85 	DIAGASSERT_SYSLOG =	1<<2
86 };
87 
88 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
89 
90 pthread_spin_t pthread__runqueue_lock = __SIMPLELOCK_UNLOCKED;
91 struct pthread_queue_t pthread__runqueue;
92 struct pthread_queue_t pthread__idlequeue;
93 struct pthread_queue_t pthread__suspqueue;
94 
95 int pthread__concurrency, pthread__maxconcurrency;
96 
97 __strong_alias(__libc_thr_self,pthread_self)
98 __strong_alias(__libc_thr_create,pthread_create)
99 __strong_alias(__libc_thr_exit,pthread_exit)
100 __strong_alias(__libc_thr_errno,pthread__errno)
101 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
102 
103 /*
104  * Static library kludge.  Place a reference to a symbol any library
105  * file which does not already have a reference here.
106  */
107 extern int pthread__cancel_stub_binder;
108 extern int pthread__sched_binder;
109 extern struct pthread_queue_t pthread__nanosleeping;
110 
111 void *pthread__static_lib_binder[] = {
112 	&pthread__cancel_stub_binder,
113 	pthread_cond_init,
114 	pthread_mutex_init,
115 	pthread_rwlock_init,
116 	pthread_barrier_init,
117 	pthread_key_create,
118 	pthread_setspecific,
119 	&pthread__sched_binder,
120 	&pthread__nanosleeping
121 };
122 
123 /*
124  * This needs to be started by the library loading code, before main()
125  * gets to run, for various things that use the state of the initial thread
126  * to work properly (thread-specific data is an application-visible example;
127  * spinlock counts for mutexes is an internal example).
128  */
129 void
130 pthread_init(void)
131 {
132 	pthread_t first;
133 	char *p;
134 	int i, mib[2], ncpu;
135 	size_t len;
136 	extern int __isthreaded;
137 
138 	mib[0] = CTL_HW;
139 	mib[1] = HW_NCPU;
140 
141 	len = sizeof(ncpu);
142 	sysctl(mib, 2, &ncpu, &len, NULL, 0);
143 
144 	/* Initialize locks first; they're needed elsewhere. */
145 	pthread__lockprim_init(ncpu);
146 
147 	/* Find out requested/possible concurrency */
148 	pthread__maxconcurrency = 1;
149 	p = getenv("PTHREAD_CONCURRENCY");
150 	if (p)
151 		pthread__maxconcurrency = atoi(p);
152 	if (pthread__maxconcurrency < 1)
153 		pthread__maxconcurrency = 1;
154 	if (pthread__maxconcurrency > ncpu)
155 		pthread__maxconcurrency = ncpu;
156 
157 	/* Allocate data structures */
158 	pthread__reidlequeue = (struct pthread_queue_t *)malloc
159 		(pthread__maxconcurrency * sizeof(struct pthread_queue_t));
160 	if (pthread__reidlequeue == NULL)
161 		err(1, "Couldn't allocate memory for pthread__reidlequeue");
162 
163 	/* Basic data structure setup */
164 	pthread_attr_init(&pthread_default_attr);
165 	PTQ_INIT(&pthread__allqueue);
166 	PTQ_INIT(&pthread__deadqueue);
167 	PTQ_INIT(&pthread__runqueue);
168 	PTQ_INIT(&pthread__idlequeue);
169 	for (i = 0; i < pthread__maxconcurrency; i++)
170 		PTQ_INIT(&pthread__reidlequeue[i]);
171 	nthreads = 1;
172 
173 	/* Create the thread structure corresponding to main() */
174 	pthread__initmain(&first);
175 	pthread__initthread(first, first);
176 	first->pt_state = PT_STATE_RUNNING;
177 	sigprocmask(0, NULL, &first->pt_sigmask);
178 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
179 
180 	/* Start subsystems */
181 	pthread__signal_init();
182 	PTHREAD_MD_INIT
183 #ifdef PTHREAD__DEBUG
184 	pthread__debug_init(ncpu);
185 #endif
186 
187 	for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
188 		switch (*p) {
189 		case 'a':
190 			pthread__diagassert |= DIAGASSERT_ABORT;
191 			break;
192 		case 'A':
193 			pthread__diagassert &= ~DIAGASSERT_ABORT;
194 			break;
195 		case 'e':
196 			pthread__diagassert |= DIAGASSERT_STDERR;
197 			break;
198 		case 'E':
199 			pthread__diagassert &= ~DIAGASSERT_STDERR;
200 			break;
201 		case 'l':
202 			pthread__diagassert |= DIAGASSERT_SYSLOG;
203 			break;
204 		case 'L':
205 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
206 			break;
207 		}
208 	}
209 
210 
211 	/* Tell libc that we're here and it should role-play accordingly. */
212 	__isthreaded = 1;
213 }
214 
215 static void
216 pthread__child_callback(void)
217 {
218 	/*
219 	 * Clean up data structures that a forked child process might
220 	 * trip over. Note that if threads have been created (causing
221 	 * this handler to be registered) the standards say that the
222 	 * child will trigger undefined behavior if it makes any
223 	 * pthread_* calls (or any other calls that aren't
224 	 * async-signal-safe), so we don't really have to clean up
225 	 * much. Anything that permits some pthread_* calls to work is
226 	 * merely being polite.
227 	 */
228 	pthread__started = 0;
229 }
230 
231 static void
232 pthread__start(void)
233 {
234 	pthread_t self, idle;
235 	int i, ret;
236 
237 	self = pthread__self(); /* should be the "main()" thread */
238 
239 	/*
240 	 * Per-process timers are cleared by fork(); despite the
241 	 * various restrictions on fork() and threads, it's legal to
242 	 * fork() before creating any threads.
243 	 */
244 	pthread__alarm_init();
245 
246 	pthread_atfork(NULL, NULL, pthread__child_callback);
247 
248 	/*
249 	 * Create idle threads
250 	 * XXX need to create more idle threads if concurrency > 3
251 	 */
252 	for (i = 0; i < NIDLETHREADS; i++) {
253 		ret = pthread__stackalloc(&idle);
254 		if (ret != 0)
255 			err(1, "Couldn't allocate stack for idle thread!");
256 		pthread__initthread(self, idle);
257 		sigfillset(&idle->pt_sigmask);
258 		idle->pt_type = PT_THREAD_IDLE;
259 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
260 		pthread__sched_idle(self, idle);
261 	}
262 
263 	/* Start up the SA subsystem */
264 	pthread__sa_start();
265 	SDPRINTF(("(pthread__start %p) Started.\n", self));
266 }
267 
268 
269 /* General-purpose thread data structure sanitization. */
270 void
271 pthread__initthread(pthread_t self, pthread_t t)
272 {
273 	int id;
274 
275 	pthread_spinlock(self, &nextthread_lock);
276 	id = nextthread;
277 	nextthread++;
278 	pthread_spinunlock(self, &nextthread_lock);
279 	t->pt_num = id;
280 
281 	t->pt_magic = PT_MAGIC;
282 	t->pt_type = PT_THREAD_NORMAL;
283 	t->pt_state = PT_STATE_RUNNABLE;
284 	pthread_lockinit(&t->pt_statelock);
285 	pthread_lockinit(&t->pt_flaglock);
286 	t->pt_spinlocks = 0;
287 	t->pt_next = NULL;
288 	t->pt_exitval = NULL;
289 	t->pt_flags = 0;
290 	t->pt_cancel = 0;
291 	t->pt_errno = 0;
292 	t->pt_parent = NULL;
293 	t->pt_heldlock = NULL;
294 	t->pt_switchto = NULL;
295 	t->pt_trapuc = NULL;
296 	sigemptyset(&t->pt_siglist);
297 	sigemptyset(&t->pt_sigmask);
298 	pthread_lockinit(&t->pt_siglock);
299 	PTQ_INIT(&t->pt_joiners);
300 	pthread_lockinit(&t->pt_join_lock);
301 	PTQ_INIT(&t->pt_cleanup_stack);
302 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
303 	t->pt_name = NULL;
304 #ifdef PTHREAD__DEBUG
305 	t->blocks = 0;
306 	t->preempts = 0;
307 	t->rescheds = 0;
308 #endif
309 }
310 
311 
312 int
313 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
314 	    void *(*startfunc)(void *), void *arg)
315 {
316 	pthread_t self, newthread;
317 	pthread_attr_t nattr;
318 	struct pthread_attr_private *p;
319 	char *name;
320 	int ret;
321 
322 	PTHREADD_ADD(PTHREADD_CREATE);
323 
324 	/*
325 	 * It's okay to check this without a lock because there can
326 	 * only be one thread before it becomes true.
327 	 */
328 	if (pthread__started == 0) {
329 		pthread__start();
330 		pthread__started = 1;
331 	}
332 
333 	if (attr == NULL)
334 		nattr = pthread_default_attr;
335 	else if (attr->pta_magic == PT_ATTR_MAGIC)
336 		nattr = *attr;
337 	else
338 		return EINVAL;
339 
340 	/* Fetch misc. attributes from the attr structure. */
341 	name = NULL;
342 	if ((p = nattr.pta_private) != NULL)
343 		if (p->ptap_name[0] != '\0')
344 			if ((name = strdup(p->ptap_name)) == NULL)
345 				return ENOMEM;
346 
347 	self = pthread__self();
348 
349 	pthread_spinlock(self, &pthread__deadqueue_lock);
350 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
351 		newthread = PTQ_FIRST(&pthread__deadqueue);
352 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
353 		pthread_spinunlock(self, &pthread__deadqueue_lock);
354 	} else {
355 		pthread_spinunlock(self, &pthread__deadqueue_lock);
356 		/* Set up a stack and allocate space for a pthread_st. */
357 		ret = pthread__stackalloc(&newthread);
358 		if (ret != 0) {
359 			if (name)
360 				free(name);
361 			return ret;
362 		}
363 	}
364 
365 	/* 2. Set up state. */
366 	pthread__initthread(self, newthread);
367 	newthread->pt_flags = nattr.pta_flags;
368 	newthread->pt_sigmask = self->pt_sigmask;
369 
370 	/* 3. Set up misc. attributes. */
371 	newthread->pt_name = name;
372 
373 	/*
374 	 * 4. Set up context.
375 	 *
376 	 * The pt_uc pointer points to a location safely below the
377 	 * stack start; this is arranged by pthread__stackalloc().
378 	 */
379 	_INITCONTEXT_U(newthread->pt_uc);
380 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
381 	pthread__uc_id(newthread->pt_uc) = newthread;
382 #endif
383 	newthread->pt_uc->uc_stack = newthread->pt_stack;
384 	newthread->pt_uc->uc_link = NULL;
385 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
386 	    startfunc, arg);
387 
388 	/* 5. Add to list of all threads. */
389 	pthread_spinlock(self, &pthread__allqueue_lock);
390 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
391 	nthreads++;
392 	pthread_spinunlock(self, &pthread__allqueue_lock);
393 
394 	SDPRINTF(("(pthread_create %p) Created new thread %p (name pointer %p).\n", self, newthread, newthread->pt_name));
395 	/* 6. Put on appropriate queue. */
396 	if (newthread->pt_flags & PT_FLAG_SUSPENDED) {
397 		pthread_spinlock(self, &newthread->pt_statelock);
398 		pthread__suspend(self, newthread);
399 		pthread_spinunlock(self, &newthread->pt_statelock);
400 	} else
401 		pthread__sched(self, newthread);
402 
403 	*thread = newthread;
404 
405 	return 0;
406 }
407 
408 
409 static void
410 pthread__create_tramp(void *(*start)(void *), void *arg)
411 {
412 	void *retval;
413 
414 	retval = (*start)(arg);
415 
416 	pthread_exit(retval);
417 
418 	/*NOTREACHED*/
419 	pthread__abort();
420 }
421 
422 int
423 pthread_suspend_np(pthread_t thread)
424 {
425 	pthread_t self = pthread__self();
426 	if (self == thread) {
427 		fprintf(stderr, "suspend_np: can't suspend self\n");
428 		return EDEADLK;
429 	}
430 	SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p (state %d).\n",
431 		     self, thread, thread->pt_state));
432 	pthread_spinlock(self, &thread->pt_statelock);
433 	if (thread->pt_blockgen != thread->pt_unblockgen) {
434 		/* XXX flaglock? */
435 		thread->pt_flags |= PT_FLAG_SUSPENDED;
436 		pthread_spinunlock(self, &thread->pt_statelock);
437 		return 0;
438 	}
439 	switch (thread->pt_state) {
440 	case PT_STATE_RUNNING:
441 		pthread__abort();	/* XXX */
442 		break;
443 	case PT_STATE_SUSPENDED:
444 		pthread_spinunlock(self, &thread->pt_statelock);
445 		return 0;
446 	case PT_STATE_RUNNABLE:
447 		pthread_spinlock(self, &pthread__runqueue_lock);
448 		PTQ_REMOVE(&pthread__runqueue, thread, pt_runq);
449 		pthread_spinunlock(self, &pthread__runqueue_lock);
450 		break;
451 	case PT_STATE_BLOCKED_QUEUE:
452 		pthread_spinlock(self, thread->pt_sleeplock);
453 		PTQ_REMOVE(thread->pt_sleepq, thread, pt_sleep);
454 		pthread_spinunlock(self, thread->pt_sleeplock);
455 		break;
456 	default:
457 		break;			/* XXX */
458 	}
459 	pthread__suspend(self, thread);
460 	pthread_spinunlock(self, &thread->pt_statelock);
461 	return 0;
462 }
463 
464 int
465 pthread_resume_np(pthread_t thread)
466 {
467 
468 	pthread_t self = pthread__self();
469 	SDPRINTF(("(pthread_resume_np %p) Resume thread %p (state %d).\n",
470 		     self, thread, thread->pt_state));
471 	pthread_spinlock(self, &thread->pt_statelock);
472 	/* XXX flaglock? */
473 	thread->pt_flags &= ~PT_FLAG_SUSPENDED;
474 	if (thread->pt_state == PT_STATE_SUSPENDED) {
475 		pthread_spinlock(self, &pthread__runqueue_lock);
476 		PTQ_REMOVE(&pthread__suspqueue, thread, pt_runq);
477 		pthread_spinunlock(self, &pthread__runqueue_lock);
478 		pthread__sched(self, thread);
479 	}
480 	pthread_spinunlock(self, &thread->pt_statelock);
481 	return 0;
482 }
483 
484 
485 /*
486  * Other threads will switch to the idle thread so that they
487  * can dispose of any awkward locks or recycle upcall state.
488  */
489 void
490 pthread__idle(void)
491 {
492 	pthread_t self;
493 
494 	PTHREADD_ADD(PTHREADD_IDLE);
495 	self = pthread__self();
496 	SDPRINTF(("(pthread__idle %p).\n", self));
497 
498 	/*
499 	 * The drill here is that we want to yield the processor,
500 	 * but for the thread itself to be recovered, we need to be on
501 	 * a list somewhere for the thread system to know about us.
502 	 */
503 	pthread_spinlock(self, &pthread__deadqueue_lock);
504 	PTQ_INSERT_TAIL(&pthread__reidlequeue[self->pt_vpid], self, pt_runq);
505 	pthread__concurrency--;
506 	SDPRINTF(("(yield %p concurrency) now %d\n", self,
507 		     pthread__concurrency));
508 	/* Don't need a flag lock; nothing else has a handle on this thread */
509 	self->pt_flags |= PT_FLAG_IDLED;
510 	pthread_spinunlock(self, &pthread__deadqueue_lock);
511 
512 	/*
513 	 * If we get to run this, then no preemption has happened
514 	 * (because the upcall handler will not continue an idle thread with
515 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
516 	 */
517 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
518 	sa_yield();
519 
520 	/* NOTREACHED */
521 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
522 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
523 	pthread__abort();
524 }
525 
526 
527 void
528 pthread_exit(void *retval)
529 {
530 	pthread_t self;
531 	struct pt_clean_t *cleanup;
532 	char *name;
533 	int nt;
534 
535 	self = pthread__self();
536 	SDPRINTF(("(pthread_exit %p) Exiting (status %p, flags %x, cancel %d).\n", self, retval, self->pt_flags, self->pt_cancel));
537 
538 	/* Disable cancellability. */
539 	pthread_spinlock(self, &self->pt_flaglock);
540 	self->pt_flags |= PT_FLAG_CS_DISABLED;
541 	self->pt_cancel = 0;
542 	pthread_spinunlock(self, &self->pt_flaglock);
543 
544 	/* Call any cancellation cleanup handlers */
545 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
546 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
547 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
548 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
549 	}
550 
551 	/* Perform cleanup of thread-specific data */
552 	pthread__destroy_tsd(self);
553 
554 	self->pt_exitval = retval;
555 
556 	/*
557 	 * it's safe to check PT_FLAG_DETACHED without pt_flaglock
558 	 * because it's only set by pthread_detach with pt_join_lock held.
559 	 */
560 	pthread_spinlock(self, &self->pt_join_lock);
561 	if (self->pt_flags & PT_FLAG_DETACHED) {
562 		self->pt_state = PT_STATE_DEAD;
563 		pthread_spinunlock(self, &self->pt_join_lock);
564 		name = self->pt_name;
565 		self->pt_name = NULL;
566 
567 		if (name != NULL)
568 			free(name);
569 
570 		pthread_spinlock(self, &pthread__allqueue_lock);
571 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
572 		nthreads--;
573 		nt = nthreads;
574 		pthread_spinunlock(self, &pthread__allqueue_lock);
575 
576 		if (nt == 0) {
577 			/* Whoah, we're the last one. Time to go. */
578 			exit(0);
579 		}
580 
581 		/* Yeah, yeah, doing work while we're dead is tacky. */
582 		pthread_spinlock(self, &pthread__deadqueue_lock);
583 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
584 		pthread__block(self, &pthread__deadqueue_lock);
585 	} else {
586 		self->pt_state = PT_STATE_ZOMBIE;
587 		/* Note: name will be freed by the joiner. */
588 		pthread_spinlock(self, &pthread__allqueue_lock);
589 		nthreads--;
590 		nt = nthreads;
591 		pthread_spinunlock(self, &pthread__allqueue_lock);
592 		if (nt == 0) {
593 			/* Whoah, we're the last one. Time to go. */
594 			exit(0);
595 		}
596 		/*
597 		 * Wake up all the potential joiners. Only one can win.
598 		 * (Can you say "Thundering Herd"? I knew you could.)
599 		 */
600 		pthread__sched_sleepers(self, &self->pt_joiners);
601 		pthread__block(self, &self->pt_join_lock);
602 	}
603 
604 	/*NOTREACHED*/
605 	pthread__abort();
606 	exit(1);
607 }
608 
609 
610 int
611 pthread_join(pthread_t thread, void **valptr)
612 {
613 	pthread_t self;
614 	char *name;
615 	int num;
616 
617 	self = pthread__self();
618 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
619 
620 	if (pthread__find(self, thread) != 0)
621 		return ESRCH;
622 
623 	if (thread->pt_magic != PT_MAGIC)
624 		return EINVAL;
625 
626 	if (thread == self)
627 		return EDEADLK;
628 
629 	pthread_spinlock(self, &thread->pt_flaglock);
630 
631 	if (thread->pt_flags & PT_FLAG_DETACHED) {
632 		pthread_spinunlock(self, &thread->pt_flaglock);
633 		return EINVAL;
634 	}
635 
636 	num = thread->pt_num;
637 	pthread_spinlock(self, &thread->pt_join_lock);
638 	while (thread->pt_state != PT_STATE_ZOMBIE) {
639 		if ((thread->pt_state == PT_STATE_DEAD) ||
640 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
641 		    (thread->pt_num != num)) {
642 			/*
643 			 * Another thread beat us to the join, or called
644 			 * pthread_detach(). If num didn't match, the
645 			 * thread died and was recycled before we got
646 			 * another chance to run.
647 			 */
648 			pthread_spinunlock(self, &thread->pt_join_lock);
649 			pthread_spinunlock(self, &thread->pt_flaglock);
650 			return ESRCH;
651 		}
652 		/*
653 		 * "I'm not dead yet!"
654 		 * "You will be soon enough."
655 		 */
656 		pthread_spinunlock(self, &thread->pt_flaglock);
657 		pthread_spinlock(self, &self->pt_statelock);
658 		if (self->pt_cancel) {
659 			pthread_spinunlock(self, &self->pt_statelock);
660 			pthread_spinunlock(self, &thread->pt_join_lock);
661 			pthread_exit(PTHREAD_CANCELED);
662 		}
663 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
664 		self->pt_sleepobj = thread;
665 		self->pt_sleepq = &thread->pt_joiners;
666 		self->pt_sleeplock = &thread->pt_join_lock;
667 		pthread_spinunlock(self, &self->pt_statelock);
668 
669 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
670 		pthread__block(self, &thread->pt_join_lock);
671 		pthread_spinlock(self, &thread->pt_flaglock);
672 		pthread_spinlock(self, &thread->pt_join_lock);
673 	}
674 
675 	/* All ours. */
676 	thread->pt_state = PT_STATE_DEAD;
677 	name = thread->pt_name;
678 	thread->pt_name = NULL;
679 	pthread_spinunlock(self, &thread->pt_join_lock);
680 	pthread_spinunlock(self, &thread->pt_flaglock);
681 
682 	if (valptr != NULL)
683 		*valptr = thread->pt_exitval;
684 
685 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
686 
687 	pthread__dead(self, thread);
688 
689 	if (name != NULL)
690 		free(name);
691 
692 	return 0;
693 }
694 
695 
696 int
697 pthread_equal(pthread_t t1, pthread_t t2)
698 {
699 
700 	/* Nothing special here. */
701 	return (t1 == t2);
702 }
703 
704 
705 int
706 pthread_detach(pthread_t thread)
707 {
708 	pthread_t self;
709 	int doreclaim = 0;
710 	char *name = NULL;
711 
712 	self = pthread__self();
713 
714 	if (pthread__find(self, thread) != 0)
715 		return ESRCH;
716 
717 	if (thread->pt_magic != PT_MAGIC)
718 		return EINVAL;
719 
720 	pthread_spinlock(self, &thread->pt_flaglock);
721 	pthread_spinlock(self, &thread->pt_join_lock);
722 
723 	if (thread->pt_flags & PT_FLAG_DETACHED) {
724 		pthread_spinunlock(self, &thread->pt_join_lock);
725 		pthread_spinunlock(self, &thread->pt_flaglock);
726 		return EINVAL;
727 	}
728 
729 	thread->pt_flags |= PT_FLAG_DETACHED;
730 
731 	/* Any joiners have to be punted now. */
732 	pthread__sched_sleepers(self, &thread->pt_joiners);
733 
734 	if (thread->pt_state == PT_STATE_ZOMBIE) {
735 		thread->pt_state = PT_STATE_DEAD;
736 		name = thread->pt_name;
737 		thread->pt_name = NULL;
738 		doreclaim = 1;
739 	}
740 
741 	pthread_spinunlock(self, &thread->pt_join_lock);
742 	pthread_spinunlock(self, &thread->pt_flaglock);
743 
744 	if (doreclaim) {
745 		pthread__dead(self, thread);
746 		if (name != NULL)
747 			free(name);
748 	}
749 
750 	return 0;
751 }
752 
753 
754 static void
755 pthread__dead(pthread_t self, pthread_t thread)
756 {
757 
758 	SDPRINTF(("(pthread__dead %p) Reclaimed %p.\n", self, thread));
759 	pthread__assert(thread != self);
760 	pthread__assert(thread->pt_state == PT_STATE_DEAD);
761 	pthread__assert(thread->pt_name == NULL);
762 
763 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
764 	pthread_spinlock(self, &pthread__allqueue_lock);
765 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
766 	pthread_spinunlock(self, &pthread__allqueue_lock);
767 
768 	pthread_spinlock(self, &pthread__deadqueue_lock);
769 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
770 	pthread_spinunlock(self, &pthread__deadqueue_lock);
771 }
772 
773 
774 int
775 pthread_getname_np(pthread_t thread, char *name, size_t len)
776 {
777 	pthread_t self;
778 
779 	self = pthread__self();
780 
781 	if (pthread__find(self, thread) != 0)
782 		return ESRCH;
783 
784 	if (thread->pt_magic != PT_MAGIC)
785 		return EINVAL;
786 
787 	pthread_spinlock(self, &thread->pt_join_lock);
788 	if (thread->pt_name == NULL)
789 		name[0] = '\0';
790 	else
791 		strlcpy(name, thread->pt_name, len);
792 	pthread_spinunlock(self, &thread->pt_join_lock);
793 
794 	return 0;
795 }
796 
797 
798 int
799 pthread_setname_np(pthread_t thread, const char *name, void *arg)
800 {
801 	pthread_t self = pthread_self();
802 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
803 	int namelen;
804 
805 	if (pthread__find(self, thread) != 0)
806 		return ESRCH;
807 
808 	if (thread->pt_magic != PT_MAGIC)
809 		return EINVAL;
810 
811 	namelen = snprintf(newname, sizeof(newname), name, arg);
812 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
813 		return EINVAL;
814 
815 	cp = strdup(newname);
816 	if (cp == NULL)
817 		return ENOMEM;
818 
819 	pthread_spinlock(self, &thread->pt_join_lock);
820 
821 	if (thread->pt_state == PT_STATE_DEAD) {
822 		pthread_spinunlock(self, &thread->pt_join_lock);
823 		free(cp);
824 		return EINVAL;
825 	}
826 
827 	oldname = thread->pt_name;
828 	thread->pt_name = cp;
829 
830 	pthread_spinunlock(self, &thread->pt_join_lock);
831 
832 	if (oldname != NULL)
833 		free(oldname);
834 
835 	return 0;
836 }
837 
838 
839 
840 /*
841  * XXX There should be a way for applications to use the efficent
842  *  inline version, but there are opacity/namespace issues.
843  */
844 pthread_t
845 pthread_self(void)
846 {
847 
848 	return pthread__self();
849 }
850 
851 
852 int
853 pthread_cancel(pthread_t thread)
854 {
855 	pthread_t self;
856 
857 	if (!(thread->pt_state == PT_STATE_RUNNING ||
858 	    thread->pt_state == PT_STATE_RUNNABLE ||
859 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE))
860 		return ESRCH;
861 
862 	self = pthread__self();
863 
864 	pthread_spinlock(self, &thread->pt_flaglock);
865 	thread->pt_flags |= PT_FLAG_CS_PENDING;
866 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
867 		thread->pt_cancel = 1;
868 		pthread_spinunlock(self, &thread->pt_flaglock);
869 		pthread_spinlock(self, &thread->pt_statelock);
870 		if (thread->pt_blockgen != thread->pt_unblockgen) {
871 			/*
872 			 * It's sleeping in the kernel. If we can wake
873 			 * it up, it will notice the cancellation when
874 			 * it returns. If it doesn't wake up when we
875 			 * make this call, then it's blocked
876 			 * uninterruptably in the kernel, and there's
877 			 * not much to be done about it.
878 			 */
879 			_lwp_wakeup(thread->pt_blockedlwp);
880 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
881 			/*
882 			 * We're blocked somewhere (pthread__block()
883 			 * was called). Cause it to wake up; it will
884 			 * check for the cancellation if the routine
885 			 * is a cancellation point, and loop and reblock
886 			 * otherwise.
887 			 */
888 			pthread_spinlock(self, thread->pt_sleeplock);
889 			PTQ_REMOVE(thread->pt_sleepq, thread,
890 			    pt_sleep);
891 			pthread_spinunlock(self, thread->pt_sleeplock);
892 			pthread__sched(self, thread);
893 		} else {
894 			/*
895 			 * Nothing. The target thread is running and will
896 			 * notice at the next deferred cancellation point.
897 			 */
898 		}
899 		pthread_spinunlock(self, &thread->pt_statelock);
900 	} else
901 		pthread_spinunlock(self, &thread->pt_flaglock);
902 
903 	return 0;
904 }
905 
906 
907 int
908 pthread_setcancelstate(int state, int *oldstate)
909 {
910 	pthread_t self;
911 	int retval;
912 
913 	self = pthread__self();
914 	retval = 0;
915 
916 	pthread_spinlock(self, &self->pt_flaglock);
917 	if (oldstate != NULL) {
918 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
919 			*oldstate = PTHREAD_CANCEL_DISABLE;
920 		else
921 			*oldstate = PTHREAD_CANCEL_ENABLE;
922 	}
923 
924 	if (state == PTHREAD_CANCEL_DISABLE) {
925 		self->pt_flags |= PT_FLAG_CS_DISABLED;
926 		if (self->pt_cancel) {
927 			self->pt_flags |= PT_FLAG_CS_PENDING;
928 			self->pt_cancel = 0;
929 		}
930 	} else if (state == PTHREAD_CANCEL_ENABLE) {
931 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
932 		/*
933 		 * If a cancellation was requested while cancellation
934 		 * was disabled, note that fact for future
935 		 * cancellation tests.
936 		 */
937 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
938 			self->pt_cancel = 1;
939 			/* This is not a deferred cancellation point. */
940 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
941 				pthread_spinunlock(self, &self->pt_flaglock);
942 				pthread_exit(PTHREAD_CANCELED);
943 			}
944 		}
945 	} else
946 		retval = EINVAL;
947 
948 	pthread_spinunlock(self, &self->pt_flaglock);
949 	return retval;
950 }
951 
952 
953 int
954 pthread_setcanceltype(int type, int *oldtype)
955 {
956 	pthread_t self;
957 	int retval;
958 
959 	self = pthread__self();
960 	retval = 0;
961 
962 	pthread_spinlock(self, &self->pt_flaglock);
963 
964 	if (oldtype != NULL) {
965 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
966 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
967 		else
968 			*oldtype = PTHREAD_CANCEL_DEFERRED;
969 	}
970 
971 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
972 		self->pt_flags |= PT_FLAG_CS_ASYNC;
973 		if (self->pt_cancel) {
974 			pthread_spinunlock(self, &self->pt_flaglock);
975 			pthread_exit(PTHREAD_CANCELED);
976 		}
977 	} else if (type == PTHREAD_CANCEL_DEFERRED)
978 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
979 	else
980 		retval = EINVAL;
981 
982 	pthread_spinunlock(self, &self->pt_flaglock);
983 	return retval;
984 }
985 
986 
987 void
988 pthread_testcancel()
989 {
990 	pthread_t self;
991 
992 	self = pthread__self();
993 	if (self->pt_cancel)
994 		pthread_exit(PTHREAD_CANCELED);
995 }
996 
997 
998 /*
999  * POSIX requires that certain functions return an error rather than
1000  * invoking undefined behavior even when handed completely bogus
1001  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
1002  * utility routine searches the list of threads for the pthread_t
1003  * value without dereferencing it.
1004  */
1005 int
1006 pthread__find(pthread_t self, pthread_t id)
1007 {
1008 	pthread_t target;
1009 
1010 	pthread_spinlock(self, &pthread__allqueue_lock);
1011 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
1012 	    if (target == id)
1013 		    break;
1014 	pthread_spinunlock(self, &pthread__allqueue_lock);
1015 
1016 	if (target == NULL)
1017 		return ESRCH;
1018 
1019 	return 0;
1020 }
1021 
1022 
1023 void
1024 pthread__testcancel(pthread_t self)
1025 {
1026 
1027 	if (self->pt_cancel)
1028 		pthread_exit(PTHREAD_CANCELED);
1029 }
1030 
1031 
1032 void
1033 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
1034 {
1035 	pthread_t self;
1036 	struct pt_clean_t *entry;
1037 
1038 	self = pthread__self();
1039 	entry = store;
1040 	entry->ptc_cleanup = cleanup;
1041 	entry->ptc_arg = arg;
1042 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1043 }
1044 
1045 
1046 void
1047 pthread__cleanup_pop(int ex, void *store)
1048 {
1049 	pthread_t self;
1050 	struct pt_clean_t *entry;
1051 
1052 	self = pthread__self();
1053 	entry = store;
1054 
1055 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
1056 	if (ex)
1057 		(*entry->ptc_cleanup)(entry->ptc_arg);
1058 }
1059 
1060 
1061 int *
1062 pthread__errno(void)
1063 {
1064 	pthread_t self;
1065 
1066 	self = pthread__self();
1067 
1068 	return &(self->pt_errno);
1069 }
1070 
1071 ssize_t	_sys_write(int, const void *, size_t);
1072 
1073 void
1074 pthread__assertfunc(const char *file, int line, const char *function,
1075 		    const char *expr)
1076 {
1077 	char buf[1024];
1078 	int len;
1079 
1080 	/*
1081 	 * snprintf should not acquire any locks, or we could
1082 	 * end up deadlocked if the assert caller held locks.
1083 	 */
1084 	len = snprintf(buf, 1024,
1085 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
1086 	    expr, file, line,
1087 	    function ? ", function \"" : "",
1088 	    function ? function : "",
1089 	    function ? "\"" : "");
1090 
1091 	_sys_write(STDERR_FILENO, buf, (size_t)len);
1092 	(void)kill(getpid(), SIGABRT);
1093 
1094 	_exit(1);
1095 }
1096 
1097 
1098 void
1099 pthread__errorfunc(const char *file, int line, const char *function,
1100 		   const char *msg)
1101 {
1102 	char buf[1024];
1103 	size_t len;
1104 
1105 	if (pthread__diagassert == 0)
1106 		return;
1107 
1108 	/*
1109 	 * snprintf should not acquire any locks, or we could
1110 	 * end up deadlocked if the assert caller held locks.
1111 	 */
1112 	len = snprintf(buf, 1024,
1113 	    "%s: Error detected by libpthread: %s.\n"
1114 	    "Detected by file \"%s\", line %d%s%s%s.\n"
1115 	    "See pthread(3) for information.\n",
1116 	    getprogname(), msg, file, line,
1117 	    function ? ", function \"" : "",
1118 	    function ? function : "",
1119 	    function ? "\"" : "");
1120 
1121 	if (pthread__diagassert & DIAGASSERT_STDERR)
1122 		_sys_write(STDERR_FILENO, buf, len);
1123 
1124 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
1125 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
1126 
1127 	if (pthread__diagassert & DIAGASSERT_ABORT) {
1128 		(void)kill(getpid(), SIGABRT);
1129 		_exit(1);
1130 	}
1131 }
1132