xref: /netbsd-src/lib/libpthread/pthread.c (revision 21e37cc72a480a47828990a439cde7ac9ffaf0c6)
1 /*	$NetBSD: pthread.c,v 1.34 2004/06/25 16:33:32 drochner Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001,2002,2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.34 2004/06/25 16:33:32 drochner Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <syslog.h>
50 #include <ucontext.h>
51 #include <unistd.h>
52 #include <sys/param.h>
53 #include <sys/sysctl.h>
54 
55 #include <sched.h>
56 #include "pthread.h"
57 #include "pthread_int.h"
58 
59 #ifdef PTHREAD_MAIN_DEBUG
60 #define SDPRINTF(x) DPRINTF(x)
61 #else
62 #define SDPRINTF(x)
63 #endif
64 
65 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
66 
67 int pthread__started;
68 
69 pthread_spin_t pthread__allqueue_lock;
70 struct pthread_queue_t pthread__allqueue;
71 
72 pthread_spin_t pthread__deadqueue_lock;
73 struct pthread_queue_t pthread__deadqueue;
74 struct pthread_queue_t *pthread__reidlequeue;
75 
76 static int nthreads;
77 static int nextthread;
78 static pthread_spin_t nextthread_lock;
79 static pthread_attr_t pthread_default_attr;
80 
81 enum {
82 	DIAGASSERT_ABORT =	1<<0,
83 	DIAGASSERT_STDERR =	1<<1,
84 	DIAGASSERT_SYSLOG =	1<<2
85 };
86 
87 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
88 
89 pthread_spin_t pthread__runqueue_lock;
90 struct pthread_queue_t pthread__runqueue;
91 struct pthread_queue_t pthread__idlequeue;
92 struct pthread_queue_t pthread__suspqueue;
93 
94 int pthread__concurrency, pthread__maxconcurrency;
95 
96 __strong_alias(__libc_thr_self,pthread_self)
97 __strong_alias(__libc_thr_create,pthread_create)
98 __strong_alias(__libc_thr_exit,pthread_exit)
99 __strong_alias(__libc_thr_errno,pthread__errno)
100 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
101 
102 /*
103  * Static library kludge.  Place a reference to a symbol any library
104  * file which does not already have a reference here.
105  */
106 extern int pthread__cancel_stub_binder;
107 extern int pthread__sched_binder;
108 extern struct pthread_queue_t pthread__nanosleeping;
109 
110 void *pthread__static_lib_binder[] = {
111 	&pthread__cancel_stub_binder,
112 	pthread_cond_init,
113 	pthread_mutex_init,
114 	pthread_rwlock_init,
115 	pthread_barrier_init,
116 	pthread_key_create,
117 	pthread_setspecific,
118 	&pthread__sched_binder,
119 	&pthread__nanosleeping
120 };
121 
122 /*
123  * This needs to be started by the library loading code, before main()
124  * gets to run, for various things that use the state of the initial thread
125  * to work properly (thread-specific data is an application-visible example;
126  * spinlock counts for mutexes is an internal example).
127  */
128 void
129 pthread_init(void)
130 {
131 	pthread_t first;
132 	char *p;
133 	int i, mib[2], ncpu;
134 	size_t len;
135 	extern int __isthreaded;
136 
137 	mib[0] = CTL_HW;
138 	mib[1] = HW_NCPU;
139 
140 	len = sizeof(ncpu);
141 	sysctl(mib, 2, &ncpu, &len, NULL, 0);
142 
143 	/* Initialize locks first; they're needed elsewhere. */
144 	pthread__lockprim_init(ncpu);
145 
146 	/* Find out requested/possible concurrency */
147 	pthread__maxconcurrency = 1;
148 	p = getenv("PTHREAD_CONCURRENCY");
149 	if (p)
150 		pthread__maxconcurrency = atoi(p);
151 	if (pthread__maxconcurrency < 1)
152 		pthread__maxconcurrency = 1;
153 	if (pthread__maxconcurrency > ncpu)
154 		pthread__maxconcurrency = ncpu;
155 
156 	/* Allocate data structures */
157 	pthread__reidlequeue = (struct pthread_queue_t *)malloc
158 		(pthread__maxconcurrency * sizeof(struct pthread_queue_t));
159 	if (pthread__reidlequeue == NULL)
160 		err(1, "Couldn't allocate memory for pthread__reidlequeue");
161 
162 	/* Basic data structure setup */
163 	pthread_attr_init(&pthread_default_attr);
164 	PTQ_INIT(&pthread__allqueue);
165 	PTQ_INIT(&pthread__deadqueue);
166 	PTQ_INIT(&pthread__runqueue);
167 	PTQ_INIT(&pthread__idlequeue);
168 	for (i = 0; i < pthread__maxconcurrency; i++)
169 		PTQ_INIT(&pthread__reidlequeue[i]);
170 	nthreads = 1;
171 
172 	/* Create the thread structure corresponding to main() */
173 	pthread__initmain(&first);
174 	pthread__initthread(first, first);
175 	first->pt_state = PT_STATE_RUNNING;
176 	sigprocmask(0, NULL, &first->pt_sigmask);
177 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
178 
179 	/* Start subsystems */
180 	pthread__signal_init();
181 	PTHREAD_MD_INIT
182 #ifdef PTHREAD__DEBUG
183 	pthread__debug_init(ncpu);
184 #endif
185 
186 	for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
187 		switch (*p) {
188 		case 'a':
189 			pthread__diagassert |= DIAGASSERT_ABORT;
190 			break;
191 		case 'A':
192 			pthread__diagassert &= ~DIAGASSERT_ABORT;
193 			break;
194 		case 'e':
195 			pthread__diagassert |= DIAGASSERT_STDERR;
196 			break;
197 		case 'E':
198 			pthread__diagassert &= ~DIAGASSERT_STDERR;
199 			break;
200 		case 'l':
201 			pthread__diagassert |= DIAGASSERT_SYSLOG;
202 			break;
203 		case 'L':
204 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
205 			break;
206 		}
207 	}
208 
209 
210 	/* Tell libc that we're here and it should role-play accordingly. */
211 	__isthreaded = 1;
212 }
213 
214 static void
215 pthread__child_callback(void)
216 {
217 	/*
218 	 * Clean up data structures that a forked child process might
219 	 * trip over. Note that if threads have been created (causing
220 	 * this handler to be registered) the standards say that the
221 	 * child will trigger undefined behavior if it makes any
222 	 * pthread_* calls (or any other calls that aren't
223 	 * async-signal-safe), so we don't really have to clean up
224 	 * much. Anything that permits some pthread_* calls to work is
225 	 * merely being polite.
226 	 */
227 	pthread__started = 0;
228 }
229 
230 static void
231 pthread__start(void)
232 {
233 	pthread_t self, idle;
234 	int i, ret;
235 
236 	self = pthread__self(); /* should be the "main()" thread */
237 
238 	/*
239 	 * Per-process timers are cleared by fork(); despite the
240 	 * various restrictions on fork() and threads, it's legal to
241 	 * fork() before creating any threads.
242 	 */
243 	pthread__alarm_init();
244 
245 	pthread_atfork(NULL, NULL, pthread__child_callback);
246 
247 	/*
248 	 * Create idle threads
249 	 * XXX need to create more idle threads if concurrency > 3
250 	 */
251 	for (i = 0; i < NIDLETHREADS; i++) {
252 		ret = pthread__stackalloc(&idle);
253 		if (ret != 0)
254 			err(1, "Couldn't allocate stack for idle thread!");
255 		pthread__initthread(self, idle);
256 		sigfillset(&idle->pt_sigmask);
257 		idle->pt_type = PT_THREAD_IDLE;
258 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
259 		pthread__sched_idle(self, idle);
260 	}
261 
262 	/* Start up the SA subsystem */
263 	pthread__sa_start();
264 	SDPRINTF(("(pthread__start %p) Started.\n", self));
265 }
266 
267 
268 /* General-purpose thread data structure sanitization. */
269 void
270 pthread__initthread(pthread_t self, pthread_t t)
271 {
272 	int id;
273 
274 	pthread_spinlock(self, &nextthread_lock);
275 	id = nextthread;
276 	nextthread++;
277 	pthread_spinunlock(self, &nextthread_lock);
278 	t->pt_num = id;
279 
280 	t->pt_magic = PT_MAGIC;
281 	t->pt_type = PT_THREAD_NORMAL;
282 	t->pt_state = PT_STATE_RUNNABLE;
283 	pthread_lockinit(&t->pt_statelock);
284 	pthread_lockinit(&t->pt_flaglock);
285 	t->pt_spinlocks = 0;
286 	t->pt_next = NULL;
287 	t->pt_exitval = NULL;
288 	t->pt_flags = 0;
289 	t->pt_cancel = 0;
290 	t->pt_errno = 0;
291 	t->pt_parent = NULL;
292 	t->pt_heldlock = NULL;
293 	t->pt_switchto = NULL;
294 	t->pt_trapuc = NULL;
295 	sigemptyset(&t->pt_siglist);
296 	sigemptyset(&t->pt_sigmask);
297 	pthread_lockinit(&t->pt_siglock);
298 	PTQ_INIT(&t->pt_joiners);
299 	pthread_lockinit(&t->pt_join_lock);
300 	PTQ_INIT(&t->pt_cleanup_stack);
301 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
302 	t->pt_name = NULL;
303 #ifdef PTHREAD__DEBUG
304 	t->blocks = 0;
305 	t->preempts = 0;
306 	t->rescheds = 0;
307 #endif
308 }
309 
310 
311 int
312 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
313 	    void *(*startfunc)(void *), void *arg)
314 {
315 	pthread_t self, newthread;
316 	pthread_attr_t nattr;
317 	struct pthread_attr_private *p;
318 	char *name;
319 	int ret;
320 
321 	PTHREADD_ADD(PTHREADD_CREATE);
322 
323 	/*
324 	 * It's okay to check this without a lock because there can
325 	 * only be one thread before it becomes true.
326 	 */
327 	if (pthread__started == 0) {
328 		pthread__start();
329 		pthread__started = 1;
330 	}
331 
332 	if (attr == NULL)
333 		nattr = pthread_default_attr;
334 	else if (attr->pta_magic == PT_ATTR_MAGIC)
335 		nattr = *attr;
336 	else
337 		return EINVAL;
338 
339 	/* Fetch misc. attributes from the attr structure. */
340 	name = NULL;
341 	if ((p = nattr.pta_private) != NULL)
342 		if (p->ptap_name[0] != '\0')
343 			if ((name = strdup(p->ptap_name)) == NULL)
344 				return ENOMEM;
345 
346 	self = pthread__self();
347 
348 	pthread_spinlock(self, &pthread__deadqueue_lock);
349 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
350 		newthread = PTQ_FIRST(&pthread__deadqueue);
351 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
352 		pthread_spinunlock(self, &pthread__deadqueue_lock);
353 	} else {
354 		pthread_spinunlock(self, &pthread__deadqueue_lock);
355 		/* Set up a stack and allocate space for a pthread_st. */
356 		ret = pthread__stackalloc(&newthread);
357 		if (ret != 0) {
358 			if (name)
359 				free(name);
360 			return ret;
361 		}
362 	}
363 
364 	/* 2. Set up state. */
365 	pthread__initthread(self, newthread);
366 	newthread->pt_flags = nattr.pta_flags;
367 	newthread->pt_sigmask = self->pt_sigmask;
368 
369 	/* 3. Set up misc. attributes. */
370 	newthread->pt_name = name;
371 
372 	/*
373 	 * 4. Set up context.
374 	 *
375 	 * The pt_uc pointer points to a location safely below the
376 	 * stack start; this is arranged by pthread__stackalloc().
377 	 */
378 	_INITCONTEXT_U(newthread->pt_uc);
379 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
380 	pthread__uc_id(newthread->pt_uc) = newthread;
381 #endif
382 	newthread->pt_uc->uc_stack = newthread->pt_stack;
383 	newthread->pt_uc->uc_link = NULL;
384 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
385 	    startfunc, arg);
386 
387 	/* 5. Add to list of all threads. */
388 	pthread_spinlock(self, &pthread__allqueue_lock);
389 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
390 	nthreads++;
391 	pthread_spinunlock(self, &pthread__allqueue_lock);
392 
393 	SDPRINTF(("(pthread_create %p) Created new thread %p (name pointer %p).\n", self, newthread, newthread->pt_name));
394 	/* 6. Put on appropriate queue. */
395 	if (newthread->pt_flags & PT_FLAG_SUSPENDED) {
396 		pthread_spinlock(self, &newthread->pt_statelock);
397 		pthread__suspend(self, newthread);
398 		pthread_spinunlock(self, &newthread->pt_statelock);
399 	} else
400 		pthread__sched(self, newthread);
401 
402 	*thread = newthread;
403 
404 	return 0;
405 }
406 
407 
408 static void
409 pthread__create_tramp(void *(*start)(void *), void *arg)
410 {
411 	void *retval;
412 
413 	retval = start(arg);
414 
415 	pthread_exit(retval);
416 
417 	/*NOTREACHED*/
418 	pthread__abort();
419 }
420 
421 int
422 pthread_suspend_np(pthread_t thread)
423 {
424 	pthread_t self = pthread__self();
425 	if (self == thread) {
426 		fprintf(stderr, "suspend_np: can't suspend self\n");
427 		return EDEADLK;
428 	}
429 	SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p (state %d).\n",
430 		     self, thread, thread->pt_state));
431 	pthread_spinlock(self, &thread->pt_statelock);
432 	if (thread->pt_blockgen != thread->pt_unblockgen) {
433 		/* XXX flaglock? */
434 		thread->pt_flags |= PT_FLAG_SUSPENDED;
435 		pthread_spinunlock(self, &thread->pt_statelock);
436 		return 0;
437 	}
438 	switch (thread->pt_state) {
439 	case PT_STATE_RUNNING:
440 		pthread__abort();	/* XXX */
441 		break;
442 	case PT_STATE_SUSPENDED:
443 		pthread_spinunlock(self, &thread->pt_statelock);
444 		return 0;
445 	case PT_STATE_RUNNABLE:
446 		pthread_spinlock(self, &pthread__runqueue_lock);
447 		PTQ_REMOVE(&pthread__runqueue, thread, pt_runq);
448 		pthread_spinunlock(self, &pthread__runqueue_lock);
449 		break;
450 	case PT_STATE_BLOCKED_QUEUE:
451 		pthread_spinlock(self, thread->pt_sleeplock);
452 		PTQ_REMOVE(thread->pt_sleepq, thread, pt_sleep);
453 		pthread_spinunlock(self, thread->pt_sleeplock);
454 		break;
455 	default:
456 		break;			/* XXX */
457 	}
458 	pthread__suspend(self, thread);
459 	pthread_spinunlock(self, &thread->pt_statelock);
460 	return 0;
461 }
462 
463 int
464 pthread_resume_np(pthread_t thread)
465 {
466 
467 	pthread_t self = pthread__self();
468 	SDPRINTF(("(pthread_resume_np %p) Resume thread %p (state %d).\n",
469 		     self, thread, thread->pt_state));
470 	pthread_spinlock(self, &thread->pt_statelock);
471 	/* XXX flaglock? */
472 	thread->pt_flags &= ~PT_FLAG_SUSPENDED;
473 	if (thread->pt_state == PT_STATE_SUSPENDED) {
474 		pthread_spinlock(self, &pthread__runqueue_lock);
475 		PTQ_REMOVE(&pthread__suspqueue, thread, pt_runq);
476 		pthread_spinunlock(self, &pthread__runqueue_lock);
477 		pthread__sched(self, thread);
478 	}
479 	pthread_spinunlock(self, &thread->pt_statelock);
480 	return 0;
481 }
482 
483 
484 /*
485  * Other threads will switch to the idle thread so that they
486  * can dispose of any awkward locks or recycle upcall state.
487  */
488 void
489 pthread__idle(void)
490 {
491 	pthread_t self;
492 
493 	PTHREADD_ADD(PTHREADD_IDLE);
494 	self = pthread__self();
495 	SDPRINTF(("(pthread__idle %p).\n", self));
496 
497 	/*
498 	 * The drill here is that we want to yield the processor,
499 	 * but for the thread itself to be recovered, we need to be on
500 	 * a list somewhere for the thread system to know about us.
501 	 */
502 	pthread_spinlock(self, &pthread__deadqueue_lock);
503 	PTQ_INSERT_TAIL(&pthread__reidlequeue[self->pt_vpid], self, pt_runq);
504 	pthread__concurrency--;
505 	SDPRINTF(("(yield %p concurrency) now %d\n", self,
506 		     pthread__concurrency));
507 	/* Don't need a flag lock; nothing else has a handle on this thread */
508 	self->pt_flags |= PT_FLAG_IDLED;
509 	pthread_spinunlock(self, &pthread__deadqueue_lock);
510 
511 	/*
512 	 * If we get to run this, then no preemption has happened
513 	 * (because the upcall handler will not continue an idle thread with
514 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
515 	 */
516 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
517 	sa_yield();
518 
519 	/* NOTREACHED */
520 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
521 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
522 	pthread__abort();
523 }
524 
525 
526 void
527 pthread_exit(void *retval)
528 {
529 	pthread_t self;
530 	struct pt_clean_t *cleanup;
531 	char *name;
532 	int nt, flags;
533 
534 	self = pthread__self();
535 	SDPRINTF(("(pthread_exit %p) Exiting (status %p, flags %x, cancel %d).\n", self, retval, self->pt_flags, self->pt_cancel));
536 
537 	/* Disable cancellability. */
538 	pthread_spinlock(self, &self->pt_flaglock);
539 	self->pt_flags |= PT_FLAG_CS_DISABLED;
540 	flags = self->pt_flags;
541 	self->pt_cancel = 0;
542 	pthread_spinunlock(self, &self->pt_flaglock);
543 
544 	/* Call any cancellation cleanup handlers */
545 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
546 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
547 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
548 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
549 	}
550 
551 	/* Perform cleanup of thread-specific data */
552 	pthread__destroy_tsd(self);
553 
554 	self->pt_exitval = retval;
555 
556 	if (flags & PT_FLAG_DETACHED) {
557 		name = self->pt_name;
558 		self->pt_name = NULL;
559 
560 		if (name != NULL)
561 			free(name);
562 
563 		pthread_spinlock(self, &pthread__allqueue_lock);
564 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
565 		nthreads--;
566 		nt = nthreads;
567 		pthread_spinunlock(self, &pthread__allqueue_lock);
568 
569 		self->pt_state = PT_STATE_DEAD;
570 		if (nt == 0) {
571 			/* Whoah, we're the last one. Time to go. */
572 			exit(0);
573 		}
574 
575 		/* Yeah, yeah, doing work while we're dead is tacky. */
576 		pthread_spinlock(self, &pthread__deadqueue_lock);
577 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
578 		pthread__block(self, &pthread__deadqueue_lock);
579 	} else {
580 		/* Note: name will be freed by the joiner. */
581 		pthread_spinlock(self, &self->pt_join_lock);
582 		pthread_spinlock(self, &pthread__allqueue_lock);
583 		nthreads--;
584 		nt = nthreads;
585 		self->pt_state = PT_STATE_ZOMBIE;
586 		pthread_spinunlock(self, &pthread__allqueue_lock);
587 		if (nt == 0) {
588 			/* Whoah, we're the last one. Time to go. */
589 			exit(0);
590 		}
591 		/*
592 		 * Wake up all the potential joiners. Only one can win.
593 		 * (Can you say "Thundering Herd"? I knew you could.)
594 		 */
595 		pthread__sched_sleepers(self, &self->pt_joiners);
596 		pthread__block(self, &self->pt_join_lock);
597 	}
598 
599 	/*NOTREACHED*/
600 	pthread__abort();
601 	exit(1);
602 }
603 
604 
605 int
606 pthread_join(pthread_t thread, void **valptr)
607 {
608 	pthread_t self;
609 	char *name;
610 	int num;
611 
612 	self = pthread__self();
613 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
614 
615 	if (pthread__find(self, thread) != 0)
616 		return ESRCH;
617 
618 	if (thread->pt_magic != PT_MAGIC)
619 		return EINVAL;
620 
621 	if (thread == self)
622 		return EDEADLK;
623 
624 	pthread_spinlock(self, &thread->pt_flaglock);
625 
626 	if (thread->pt_flags & PT_FLAG_DETACHED) {
627 		pthread_spinunlock(self, &thread->pt_flaglock);
628 		return EINVAL;
629 	}
630 
631 	num = thread->pt_num;
632 	pthread_spinlock(self, &thread->pt_join_lock);
633 	while (thread->pt_state != PT_STATE_ZOMBIE) {
634 		if ((thread->pt_state == PT_STATE_DEAD) ||
635 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
636 		    (thread->pt_num != num)) {
637 			/*
638 			 * Another thread beat us to the join, or called
639 			 * pthread_detach(). If num didn't match, the
640 			 * thread died and was recycled before we got
641 			 * another chance to run.
642 			 */
643 			pthread_spinunlock(self, &thread->pt_join_lock);
644 			pthread_spinunlock(self, &thread->pt_flaglock);
645 			return ESRCH;
646 		}
647 		/*
648 		 * "I'm not dead yet!"
649 		 * "You will be soon enough."
650 		 */
651 		pthread_spinunlock(self, &thread->pt_flaglock);
652 		pthread_spinlock(self, &self->pt_statelock);
653 		if (self->pt_cancel) {
654 			pthread_spinunlock(self, &self->pt_statelock);
655 			pthread_spinunlock(self, &thread->pt_join_lock);
656 			pthread_exit(PTHREAD_CANCELED);
657 		}
658 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
659 		self->pt_sleepobj = thread;
660 		self->pt_sleepq = &thread->pt_joiners;
661 		self->pt_sleeplock = &thread->pt_join_lock;
662 		pthread_spinunlock(self, &self->pt_statelock);
663 
664 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
665 		pthread__block(self, &thread->pt_join_lock);
666 		pthread_spinlock(self, &thread->pt_flaglock);
667 		pthread_spinlock(self, &thread->pt_join_lock);
668 	}
669 
670 	/* All ours. */
671 	thread->pt_state = PT_STATE_DEAD;
672 	name = thread->pt_name;
673 	thread->pt_name = NULL;
674 	pthread_spinunlock(self, &thread->pt_join_lock);
675 	pthread_spinunlock(self, &thread->pt_flaglock);
676 
677 	if (valptr != NULL)
678 		*valptr = thread->pt_exitval;
679 
680 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
681 
682 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
683 	pthread_spinlock(self, &pthread__allqueue_lock);
684 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
685 	pthread_spinunlock(self, &pthread__allqueue_lock);
686 
687 	pthread_spinlock(self, &pthread__deadqueue_lock);
688 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
689 	pthread_spinunlock(self, &pthread__deadqueue_lock);
690 
691 	if (name != NULL)
692 		free(name);
693 
694 	return 0;
695 }
696 
697 
698 int
699 pthread_equal(pthread_t t1, pthread_t t2)
700 {
701 
702 	/* Nothing special here. */
703 	return (t1 == t2);
704 }
705 
706 
707 int
708 pthread_detach(pthread_t thread)
709 {
710 	pthread_t self;
711 
712 	self = pthread__self();
713 
714 	if (pthread__find(self, thread) != 0)
715 		return ESRCH;
716 
717 	if (thread->pt_magic != PT_MAGIC)
718 		return EINVAL;
719 
720 	pthread_spinlock(self, &thread->pt_flaglock);
721 	pthread_spinlock(self, &thread->pt_join_lock);
722 
723 	if (thread->pt_flags & PT_FLAG_DETACHED) {
724 		pthread_spinunlock(self, &thread->pt_join_lock);
725 		pthread_spinunlock(self, &thread->pt_flaglock);
726 		return EINVAL;
727 	}
728 
729 	thread->pt_flags |= PT_FLAG_DETACHED;
730 
731 	/* Any joiners have to be punted now. */
732 	pthread__sched_sleepers(self, &thread->pt_joiners);
733 
734 	pthread_spinunlock(self, &thread->pt_join_lock);
735 	pthread_spinunlock(self, &thread->pt_flaglock);
736 
737 	return 0;
738 }
739 
740 
741 int
742 pthread_getname_np(pthread_t thread, char *name, size_t len)
743 {
744 	pthread_t self;
745 
746 	self = pthread__self();
747 
748 	if (pthread__find(self, thread) != 0)
749 		return ESRCH;
750 
751 	if (thread->pt_magic != PT_MAGIC)
752 		return EINVAL;
753 
754 	pthread_spinlock(self, &thread->pt_join_lock);
755 	if (thread->pt_name == NULL)
756 		name[0] = '\0';
757 	else
758 		strlcpy(name, thread->pt_name, len);
759 	pthread_spinunlock(self, &thread->pt_join_lock);
760 
761 	return 0;
762 }
763 
764 
765 int
766 pthread_setname_np(pthread_t thread, const char *name, void *arg)
767 {
768 	pthread_t self = pthread_self();
769 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
770 	int namelen;
771 
772 	if (pthread__find(self, thread) != 0)
773 		return ESRCH;
774 
775 	if (thread->pt_magic != PT_MAGIC)
776 		return EINVAL;
777 
778 	namelen = snprintf(newname, sizeof(newname), name, arg);
779 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
780 		return EINVAL;
781 
782 	cp = strdup(newname);
783 	if (cp == NULL)
784 		return ENOMEM;
785 
786 	pthread_spinlock(self, &thread->pt_join_lock);
787 
788 	if (thread->pt_state == PT_STATE_DEAD) {
789 		pthread_spinunlock(self, &thread->pt_join_lock);
790 		free(cp);
791 		return EINVAL;
792 	}
793 
794 	oldname = thread->pt_name;
795 	thread->pt_name = cp;
796 
797 	pthread_spinunlock(self, &thread->pt_join_lock);
798 
799 	if (oldname != NULL)
800 		free(oldname);
801 
802 	return 0;
803 }
804 
805 
806 
807 /*
808  * XXX There should be a way for applications to use the efficent
809  *  inline version, but there are opacity/namespace issues.
810  */
811 pthread_t
812 pthread_self(void)
813 {
814 
815 	return pthread__self();
816 }
817 
818 
819 int
820 pthread_cancel(pthread_t thread)
821 {
822 	pthread_t self;
823 
824 	if (!(thread->pt_state == PT_STATE_RUNNING ||
825 	    thread->pt_state == PT_STATE_RUNNABLE ||
826 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE))
827 		return ESRCH;
828 
829 	self = pthread__self();
830 
831 	pthread_spinlock(self, &thread->pt_flaglock);
832 	thread->pt_flags |= PT_FLAG_CS_PENDING;
833 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
834 		thread->pt_cancel = 1;
835 		pthread_spinunlock(self, &thread->pt_flaglock);
836 		pthread_spinlock(self, &thread->pt_statelock);
837 		if (thread->pt_blockgen != thread->pt_unblockgen) {
838 			/*
839 			 * It's sleeping in the kernel. If we can wake
840 			 * it up, it will notice the cancellation when
841 			 * it returns. If it doesn't wake up when we
842 			 * make this call, then it's blocked
843 			 * uninterruptably in the kernel, and there's
844 			 * not much to be done about it.
845 			 */
846 			_lwp_wakeup(thread->pt_blockedlwp);
847 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
848 			/*
849 			 * We're blocked somewhere (pthread__block()
850 			 * was called). Cause it to wake up; it will
851 			 * check for the cancellation if the routine
852 			 * is a cancellation point, and loop and reblock
853 			 * otherwise.
854 			 */
855 			pthread_spinlock(self, thread->pt_sleeplock);
856 			PTQ_REMOVE(thread->pt_sleepq, thread,
857 			    pt_sleep);
858 			pthread_spinunlock(self, thread->pt_sleeplock);
859 			pthread__sched(self, thread);
860 		} else {
861 			/*
862 			 * Nothing. The target thread is running and will
863 			 * notice at the next deferred cancellation point.
864 			 */
865 		}
866 		pthread_spinunlock(self, &thread->pt_statelock);
867 	} else
868 		pthread_spinunlock(self, &thread->pt_flaglock);
869 
870 	return 0;
871 }
872 
873 
874 int
875 pthread_setcancelstate(int state, int *oldstate)
876 {
877 	pthread_t self;
878 	int retval;
879 
880 	self = pthread__self();
881 	retval = 0;
882 
883 	pthread_spinlock(self, &self->pt_flaglock);
884 	if (oldstate != NULL) {
885 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
886 			*oldstate = PTHREAD_CANCEL_DISABLE;
887 		else
888 			*oldstate = PTHREAD_CANCEL_ENABLE;
889 	}
890 
891 	if (state == PTHREAD_CANCEL_DISABLE) {
892 		self->pt_flags |= PT_FLAG_CS_DISABLED;
893 		if (self->pt_cancel) {
894 			self->pt_flags |= PT_FLAG_CS_PENDING;
895 			self->pt_cancel = 0;
896 		}
897 	} else if (state == PTHREAD_CANCEL_ENABLE) {
898 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
899 		/*
900 		 * If a cancellation was requested while cancellation
901 		 * was disabled, note that fact for future
902 		 * cancellation tests.
903 		 */
904 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
905 			self->pt_cancel = 1;
906 			/* This is not a deferred cancellation point. */
907 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
908 				pthread_spinunlock(self, &self->pt_flaglock);
909 				pthread_exit(PTHREAD_CANCELED);
910 			}
911 		}
912 	} else
913 		retval = EINVAL;
914 
915 	pthread_spinunlock(self, &self->pt_flaglock);
916 	return retval;
917 }
918 
919 
920 int
921 pthread_setcanceltype(int type, int *oldtype)
922 {
923 	pthread_t self;
924 	int retval;
925 
926 	self = pthread__self();
927 	retval = 0;
928 
929 	pthread_spinlock(self, &self->pt_flaglock);
930 
931 	if (oldtype != NULL) {
932 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
933 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
934 		else
935 			*oldtype = PTHREAD_CANCEL_DEFERRED;
936 	}
937 
938 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
939 		self->pt_flags |= PT_FLAG_CS_ASYNC;
940 		if (self->pt_cancel) {
941 			pthread_spinunlock(self, &self->pt_flaglock);
942 			pthread_exit(PTHREAD_CANCELED);
943 		}
944 	} else if (type == PTHREAD_CANCEL_DEFERRED)
945 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
946 	else
947 		retval = EINVAL;
948 
949 	pthread_spinunlock(self, &self->pt_flaglock);
950 	return retval;
951 }
952 
953 
954 void
955 pthread_testcancel()
956 {
957 	pthread_t self;
958 
959 	self = pthread__self();
960 	if (self->pt_cancel)
961 		pthread_exit(PTHREAD_CANCELED);
962 }
963 
964 
965 /*
966  * POSIX requires that certain functions return an error rather than
967  * invoking undefined behavior even when handed completely bogus
968  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
969  * utility routine searches the list of threads for the pthread_t
970  * value without dereferencing it.
971  */
972 int
973 pthread__find(pthread_t self, pthread_t id)
974 {
975 	pthread_t target;
976 
977 	pthread_spinlock(self, &pthread__allqueue_lock);
978 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
979 	    if (target == id)
980 		    break;
981 	pthread_spinunlock(self, &pthread__allqueue_lock);
982 
983 	if (target == NULL)
984 		return ESRCH;
985 
986 	return 0;
987 }
988 
989 
990 void
991 pthread__testcancel(pthread_t self)
992 {
993 
994 	if (self->pt_cancel)
995 		pthread_exit(PTHREAD_CANCELED);
996 }
997 
998 
999 void
1000 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
1001 {
1002 	pthread_t self;
1003 	struct pt_clean_t *entry;
1004 
1005 	self = pthread__self();
1006 	entry = store;
1007 	entry->ptc_cleanup = cleanup;
1008 	entry->ptc_arg = arg;
1009 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1010 }
1011 
1012 
1013 void
1014 pthread__cleanup_pop(int ex, void *store)
1015 {
1016 	pthread_t self;
1017 	struct pt_clean_t *entry;
1018 
1019 	self = pthread__self();
1020 	entry = store;
1021 
1022 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
1023 	if (ex)
1024 		(*entry->ptc_cleanup)(entry->ptc_arg);
1025 }
1026 
1027 
1028 int *
1029 pthread__errno(void)
1030 {
1031 	pthread_t self;
1032 
1033 	self = pthread__self();
1034 
1035 	return &(self->pt_errno);
1036 }
1037 
1038 ssize_t	_sys_write(int, const void *, size_t);
1039 
1040 void
1041 pthread__assertfunc(const char *file, int line, const char *function,
1042 		    const char *expr)
1043 {
1044 	char buf[1024];
1045 	int len;
1046 
1047 	/*
1048 	 * snprintf should not acquire any locks, or we could
1049 	 * end up deadlocked if the assert caller held locks.
1050 	 */
1051 	len = snprintf(buf, 1024,
1052 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
1053 	    expr, file, line,
1054 	    function ? ", function \"" : "",
1055 	    function ? function : "",
1056 	    function ? "\"" : "");
1057 
1058 	_sys_write(STDERR_FILENO, buf, (size_t)len);
1059 	(void)kill(getpid(), SIGABRT);
1060 
1061 	_exit(1);
1062 }
1063 
1064 
1065 void
1066 pthread__errorfunc(const char *file, int line, const char *function,
1067 		   const char *msg)
1068 {
1069 	char buf[1024];
1070 	size_t len;
1071 
1072 	if (pthread__diagassert == 0)
1073 		return;
1074 
1075 	/*
1076 	 * snprintf should not acquire any locks, or we could
1077 	 * end up deadlocked if the assert caller held locks.
1078 	 */
1079 	len = snprintf(buf, 1024,
1080 	    "%s: Error detected by libpthread: %s.\n"
1081 	    "Detected by file \"%s\", line %d%s%s%s.\n"
1082 	    "See pthread(3) for information.\n",
1083 	    getprogname(), msg, file, line,
1084 	    function ? ", function \"" : "",
1085 	    function ? function : "",
1086 	    function ? "\"" : "");
1087 
1088 	if (pthread__diagassert & DIAGASSERT_STDERR)
1089 		_sys_write(STDERR_FILENO, buf, len);
1090 
1091 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
1092 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
1093 
1094 	if (pthread__diagassert & DIAGASSERT_ABORT) {
1095 		(void)kill(getpid(), SIGABRT);
1096 		_exit(1);
1097 	}
1098 }
1099