xref: /netbsd-src/lib/libpthread/pthread.c (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /*	$NetBSD: pthread.c,v 1.46 2005/10/19 02:44:45 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001,2002,2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.46 2005/10/19 02:44:45 chs Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <syslog.h>
50 #include <ucontext.h>
51 #include <unistd.h>
52 #include <sys/param.h>
53 #include <sys/sysctl.h>
54 #ifdef PTHREAD_MLOCK_KLUDGE
55 #include <sys/mman.h>
56 #endif
57 
58 #include <sched.h>
59 #include "pthread.h"
60 #include "pthread_int.h"
61 
62 #ifdef PTHREAD_MAIN_DEBUG
63 #define SDPRINTF(x) DPRINTF(x)
64 #else
65 #define SDPRINTF(x)
66 #endif
67 
68 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
69 static void	pthread__dead(pthread_t, pthread_t);
70 
71 int pthread__started;
72 
73 pthread_spin_t pthread__allqueue_lock = __SIMPLELOCK_UNLOCKED;
74 struct pthread_queue_t pthread__allqueue;
75 
76 pthread_spin_t pthread__deadqueue_lock = __SIMPLELOCK_UNLOCKED;
77 struct pthread_queue_t pthread__deadqueue;
78 struct pthread_queue_t *pthread__reidlequeue;
79 
80 static int nthreads;
81 static int nextthread;
82 static pthread_spin_t nextthread_lock = __SIMPLELOCK_UNLOCKED;
83 static pthread_attr_t pthread_default_attr;
84 
85 enum {
86 	DIAGASSERT_ABORT =	1<<0,
87 	DIAGASSERT_STDERR =	1<<1,
88 	DIAGASSERT_SYSLOG =	1<<2
89 };
90 
91 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
92 
93 pthread_spin_t pthread__runqueue_lock = __SIMPLELOCK_UNLOCKED;
94 struct pthread_queue_t pthread__runqueue;
95 struct pthread_queue_t pthread__idlequeue;
96 struct pthread_queue_t pthread__suspqueue;
97 
98 int pthread__concurrency, pthread__maxconcurrency;
99 
100 __strong_alias(__libc_thr_self,pthread_self)
101 __strong_alias(__libc_thr_create,pthread_create)
102 __strong_alias(__libc_thr_exit,pthread_exit)
103 __strong_alias(__libc_thr_errno,pthread__errno)
104 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
105 
106 /*
107  * Static library kludge.  Place a reference to a symbol any library
108  * file which does not already have a reference here.
109  */
110 extern int pthread__cancel_stub_binder;
111 extern int pthread__sched_binder;
112 extern struct pthread_queue_t pthread__nanosleeping;
113 
114 void *pthread__static_lib_binder[] = {
115 	&pthread__cancel_stub_binder,
116 	pthread_cond_init,
117 	pthread_mutex_init,
118 	pthread_rwlock_init,
119 	pthread_barrier_init,
120 	pthread_key_create,
121 	pthread_setspecific,
122 	&pthread__sched_binder,
123 	&pthread__nanosleeping
124 };
125 
126 /*
127  * This needs to be started by the library loading code, before main()
128  * gets to run, for various things that use the state of the initial thread
129  * to work properly (thread-specific data is an application-visible example;
130  * spinlock counts for mutexes is an internal example).
131  */
132 void
133 pthread_init(void)
134 {
135 	pthread_t first;
136 	char *p;
137 	int i, mib[2], ncpu;
138 	size_t len;
139 	extern int __isthreaded;
140 #ifdef PTHREAD_MLOCK_KLUDGE
141 	int ret;
142 #endif
143 
144 	mib[0] = CTL_HW;
145 	mib[1] = HW_NCPU;
146 
147 	len = sizeof(ncpu);
148 	sysctl(mib, 2, &ncpu, &len, NULL, 0);
149 
150 	/* Initialize locks first; they're needed elsewhere. */
151 	pthread__lockprim_init(ncpu);
152 
153 	/* Find out requested/possible concurrency */
154 	p = getenv("PTHREAD_CONCURRENCY");
155 	pthread__maxconcurrency = p ? atoi(p) : 1;
156 
157 	if (pthread__maxconcurrency < 1)
158 		pthread__maxconcurrency = 1;
159 	if (pthread__maxconcurrency > ncpu)
160 		pthread__maxconcurrency = ncpu;
161 
162 	/* Allocate data structures */
163 	pthread__reidlequeue = (struct pthread_queue_t *)malloc
164 		(pthread__maxconcurrency * sizeof(struct pthread_queue_t));
165 	if (pthread__reidlequeue == NULL)
166 		err(1, "Couldn't allocate memory for pthread__reidlequeue");
167 
168 	/* Basic data structure setup */
169 	pthread_attr_init(&pthread_default_attr);
170 	PTQ_INIT(&pthread__allqueue);
171 	PTQ_INIT(&pthread__deadqueue);
172 #ifdef PTHREAD_MLOCK_KLUDGE
173 	ret = mlock(&pthread__deadqueue, sizeof(pthread__deadqueue));
174 	pthread__assert(ret == 0);
175 #endif
176 	PTQ_INIT(&pthread__runqueue);
177 	PTQ_INIT(&pthread__idlequeue);
178 	for (i = 0; i < pthread__maxconcurrency; i++)
179 		PTQ_INIT(&pthread__reidlequeue[i]);
180 	nthreads = 1;
181 
182 	/* Create the thread structure corresponding to main() */
183 	pthread__initmain(&first);
184 	pthread__initthread(first, first);
185 	first->pt_state = PT_STATE_RUNNING;
186 	sigprocmask(0, NULL, &first->pt_sigmask);
187 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
188 
189 	/* Start subsystems */
190 	pthread__signal_init();
191 	PTHREAD_MD_INIT
192 #ifdef PTHREAD__DEBUG
193 	pthread__debug_init(ncpu);
194 #endif
195 
196 	for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
197 		switch (*p) {
198 		case 'a':
199 			pthread__diagassert |= DIAGASSERT_ABORT;
200 			break;
201 		case 'A':
202 			pthread__diagassert &= ~DIAGASSERT_ABORT;
203 			break;
204 		case 'e':
205 			pthread__diagassert |= DIAGASSERT_STDERR;
206 			break;
207 		case 'E':
208 			pthread__diagassert &= ~DIAGASSERT_STDERR;
209 			break;
210 		case 'l':
211 			pthread__diagassert |= DIAGASSERT_SYSLOG;
212 			break;
213 		case 'L':
214 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
215 			break;
216 		}
217 	}
218 
219 
220 	/* Tell libc that we're here and it should role-play accordingly. */
221 	__isthreaded = 1;
222 }
223 
224 static void
225 pthread__child_callback(void)
226 {
227 	/*
228 	 * Clean up data structures that a forked child process might
229 	 * trip over. Note that if threads have been created (causing
230 	 * this handler to be registered) the standards say that the
231 	 * child will trigger undefined behavior if it makes any
232 	 * pthread_* calls (or any other calls that aren't
233 	 * async-signal-safe), so we don't really have to clean up
234 	 * much. Anything that permits some pthread_* calls to work is
235 	 * merely being polite.
236 	 */
237 	pthread__started = 0;
238 }
239 
240 static void
241 pthread__start(void)
242 {
243 	pthread_t self, idle;
244 	int i, ret;
245 
246 	self = pthread__self(); /* should be the "main()" thread */
247 
248 	/*
249 	 * Per-process timers are cleared by fork(); despite the
250 	 * various restrictions on fork() and threads, it's legal to
251 	 * fork() before creating any threads.
252 	 */
253 	pthread__alarm_init();
254 
255 	pthread__signal_start();
256 
257 	pthread_atfork(NULL, NULL, pthread__child_callback);
258 
259 	/*
260 	 * Create idle threads
261 	 * XXX need to create more idle threads if concurrency > 3
262 	 */
263 	for (i = 0; i < NIDLETHREADS; i++) {
264 		ret = pthread__stackalloc(&idle);
265 		if (ret != 0)
266 			err(1, "Couldn't allocate stack for idle thread!");
267 		pthread__initthread(self, idle);
268 		sigfillset(&idle->pt_sigmask);
269 		idle->pt_type = PT_THREAD_IDLE;
270 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
271 		pthread__sched_idle(self, idle);
272 	}
273 
274 	/* Start up the SA subsystem */
275 	pthread__sa_start();
276 	SDPRINTF(("(pthread__start %p) Started.\n", self));
277 }
278 
279 
280 /* General-purpose thread data structure sanitization. */
281 void
282 pthread__initthread(pthread_t self, pthread_t t)
283 {
284 	int id;
285 
286 	pthread_spinlock(self, &nextthread_lock);
287 	id = nextthread;
288 	nextthread++;
289 	pthread_spinunlock(self, &nextthread_lock);
290 	t->pt_num = id;
291 
292 	t->pt_magic = PT_MAGIC;
293 	t->pt_type = PT_THREAD_NORMAL;
294 	t->pt_state = PT_STATE_RUNNABLE;
295 	pthread_lockinit(&t->pt_statelock);
296 	pthread_lockinit(&t->pt_flaglock);
297 	t->pt_spinlocks = 0;
298 	t->pt_next = NULL;
299 	t->pt_exitval = NULL;
300 	t->pt_flags = 0;
301 	t->pt_cancel = 0;
302 	t->pt_errno = 0;
303 	t->pt_parent = NULL;
304 	t->pt_heldlock = NULL;
305 	t->pt_switchto = NULL;
306 	t->pt_trapuc = NULL;
307 	sigemptyset(&t->pt_siglist);
308 	sigemptyset(&t->pt_sigmask);
309 	pthread_lockinit(&t->pt_siglock);
310 	PTQ_INIT(&t->pt_joiners);
311 	pthread_lockinit(&t->pt_join_lock);
312 	PTQ_INIT(&t->pt_cleanup_stack);
313 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
314 	t->pt_name = NULL;
315 #ifdef PTHREAD__DEBUG
316 	t->blocks = 0;
317 	t->preempts = 0;
318 	t->rescheds = 0;
319 #endif
320 }
321 
322 
323 int
324 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
325 	    void *(*startfunc)(void *), void *arg)
326 {
327 	pthread_t self, newthread;
328 	pthread_attr_t nattr;
329 	struct pthread_attr_private *p;
330 	char *name;
331 	int ret;
332 
333 	PTHREADD_ADD(PTHREADD_CREATE);
334 
335 	/*
336 	 * It's okay to check this without a lock because there can
337 	 * only be one thread before it becomes true.
338 	 */
339 	if (pthread__started == 0) {
340 		pthread__start();
341 		pthread__started = 1;
342 	}
343 
344 	if (attr == NULL)
345 		nattr = pthread_default_attr;
346 	else if (attr->pta_magic == PT_ATTR_MAGIC)
347 		nattr = *attr;
348 	else
349 		return EINVAL;
350 
351 	/* Fetch misc. attributes from the attr structure. */
352 	name = NULL;
353 	if ((p = nattr.pta_private) != NULL)
354 		if (p->ptap_name[0] != '\0')
355 			if ((name = strdup(p->ptap_name)) == NULL)
356 				return ENOMEM;
357 
358 	self = pthread__self();
359 
360 	pthread_spinlock(self, &pthread__deadqueue_lock);
361 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
362 		newthread = PTQ_FIRST(&pthread__deadqueue);
363 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
364 		pthread_spinunlock(self, &pthread__deadqueue_lock);
365 	} else {
366 		pthread_spinunlock(self, &pthread__deadqueue_lock);
367 		/* Set up a stack and allocate space for a pthread_st. */
368 		ret = pthread__stackalloc(&newthread);
369 		if (ret != 0) {
370 			if (name)
371 				free(name);
372 			return ret;
373 		}
374 #ifdef PTHREAD_MLOCK_KLUDGE
375 		ret = mlock(newthread, sizeof(struct __pthread_st));
376 		if (ret < 0) {
377 			return EAGAIN;
378 		}
379 #endif
380 	}
381 
382 	/* 2. Set up state. */
383 	pthread__initthread(self, newthread);
384 	newthread->pt_flags = nattr.pta_flags;
385 	newthread->pt_sigmask = self->pt_sigmask;
386 
387 	/* 3. Set up misc. attributes. */
388 	newthread->pt_name = name;
389 
390 	/*
391 	 * 4. Set up context.
392 	 *
393 	 * The pt_uc pointer points to a location safely below the
394 	 * stack start; this is arranged by pthread__stackalloc().
395 	 */
396 	_INITCONTEXT_U(newthread->pt_uc);
397 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
398 	pthread__uc_id(newthread->pt_uc) = newthread;
399 #endif
400 	newthread->pt_uc->uc_stack = newthread->pt_stack;
401 	newthread->pt_uc->uc_link = NULL;
402 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
403 	    startfunc, arg);
404 
405 	/* 5. Add to list of all threads. */
406 	pthread_spinlock(self, &pthread__allqueue_lock);
407 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
408 	nthreads++;
409 	pthread_spinunlock(self, &pthread__allqueue_lock);
410 
411 	SDPRINTF(("(pthread_create %p) new thread %p (name pointer %p).\n",
412 		  self, newthread, newthread->pt_name));
413 	/* 6. Put on appropriate queue. */
414 	if (newthread->pt_flags & PT_FLAG_SUSPENDED) {
415 		pthread_spinlock(self, &newthread->pt_statelock);
416 		pthread__suspend(self, newthread);
417 		pthread_spinunlock(self, &newthread->pt_statelock);
418 	} else
419 		pthread__sched(self, newthread);
420 
421 	*thread = newthread;
422 
423 	return 0;
424 }
425 
426 
427 static void
428 pthread__create_tramp(void *(*start)(void *), void *arg)
429 {
430 	void *retval;
431 
432 	retval = (*start)(arg);
433 
434 	pthread_exit(retval);
435 
436 	/*NOTREACHED*/
437 	pthread__abort();
438 }
439 
440 int
441 pthread_suspend_np(pthread_t thread)
442 {
443 	pthread_t self;
444 
445 	self = pthread__self();
446 	if (self == thread) {
447 		return EDEADLK;
448 	}
449 #ifdef ERRORCHECK
450 	if (pthread__find(self, thread) != 0)
451 		return ESRCH;
452 #endif
453 	SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p (state %d).\n",
454 		     self, thread, thread->pt_state));
455 	pthread_spinlock(self, &thread->pt_statelock);
456 	if (thread->pt_blockgen != thread->pt_unblockgen) {
457 		/* XXX flaglock? */
458 		thread->pt_flags |= PT_FLAG_SUSPENDED;
459 		pthread_spinunlock(self, &thread->pt_statelock);
460 		return 0;
461 	}
462 	switch (thread->pt_state) {
463 	case PT_STATE_RUNNING:
464 		pthread__abort();	/* XXX */
465 		break;
466 	case PT_STATE_SUSPENDED:
467 		pthread_spinunlock(self, &thread->pt_statelock);
468 		return 0;
469 	case PT_STATE_RUNNABLE:
470 		pthread_spinlock(self, &pthread__runqueue_lock);
471 		PTQ_REMOVE(&pthread__runqueue, thread, pt_runq);
472 		pthread_spinunlock(self, &pthread__runqueue_lock);
473 		break;
474 	case PT_STATE_BLOCKED_QUEUE:
475 		pthread_spinlock(self, thread->pt_sleeplock);
476 		PTQ_REMOVE(thread->pt_sleepq, thread, pt_sleep);
477 		pthread_spinunlock(self, thread->pt_sleeplock);
478 		break;
479 	case PT_STATE_ZOMBIE:
480 		goto out;
481 	default:
482 		break;			/* XXX */
483 	}
484 	pthread__suspend(self, thread);
485 
486 out:
487 	pthread_spinunlock(self, &thread->pt_statelock);
488 	return 0;
489 }
490 
491 int
492 pthread_resume_np(pthread_t thread)
493 {
494 	pthread_t self;
495 
496 	self = pthread__self();
497 #ifdef ERRORCHECK
498 	if (pthread__find(self, thread) != 0)
499 		return ESRCH;
500 #endif
501 	SDPRINTF(("(pthread_resume_np %p) Resume thread %p (state %d).\n",
502 		     self, thread, thread->pt_state));
503 	pthread_spinlock(self, &thread->pt_statelock);
504 	/* XXX flaglock? */
505 	thread->pt_flags &= ~PT_FLAG_SUSPENDED;
506 	if (thread->pt_state == PT_STATE_SUSPENDED) {
507 		pthread_spinlock(self, &pthread__runqueue_lock);
508 		PTQ_REMOVE(&pthread__suspqueue, thread, pt_runq);
509 		pthread_spinunlock(self, &pthread__runqueue_lock);
510 		pthread__sched(self, thread);
511 	}
512 	pthread_spinunlock(self, &thread->pt_statelock);
513 	return 0;
514 }
515 
516 
517 /*
518  * Other threads will switch to the idle thread so that they
519  * can dispose of any awkward locks or recycle upcall state.
520  */
521 void
522 pthread__idle(void)
523 {
524 	pthread_t self;
525 
526 	PTHREADD_ADD(PTHREADD_IDLE);
527 	self = pthread__self();
528 	SDPRINTF(("(pthread__idle %p).\n", self));
529 
530 	/*
531 	 * The drill here is that we want to yield the processor,
532 	 * but for the thread itself to be recovered, we need to be on
533 	 * a list somewhere for the thread system to know about us.
534 	 */
535 	pthread_spinlock(self, &pthread__deadqueue_lock);
536 	PTQ_INSERT_TAIL(&pthread__reidlequeue[self->pt_vpid], self, pt_runq);
537 	pthread__concurrency--;
538 	SDPRINTF(("(yield %p concurrency) now %d\n", self,
539 		     pthread__concurrency));
540 	/* Don't need a flag lock; nothing else has a handle on this thread */
541 	self->pt_flags |= PT_FLAG_IDLED;
542 	pthread_spinunlock(self, &pthread__deadqueue_lock);
543 
544 	/*
545 	 * If we get to run this, then no preemption has happened
546 	 * (because the upcall handler will not continue an idle thread with
547 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
548 	 */
549 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
550 	sa_yield();
551 
552 	/* NOTREACHED */
553 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
554 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
555 	pthread__abort();
556 }
557 
558 
559 void
560 pthread_exit(void *retval)
561 {
562 	pthread_t self;
563 	struct pt_clean_t *cleanup;
564 	char *name;
565 	int nt;
566 
567 	self = pthread__self();
568 	SDPRINTF(("(pthread_exit %p) status %p, flags %x, cancel %d\n",
569 		  self, retval, self->pt_flags, self->pt_cancel));
570 
571 	/* Disable cancellability. */
572 	pthread_spinlock(self, &self->pt_flaglock);
573 	self->pt_flags |= PT_FLAG_CS_DISABLED;
574 	self->pt_cancel = 0;
575 	pthread_spinunlock(self, &self->pt_flaglock);
576 
577 	/* Call any cancellation cleanup handlers */
578 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
579 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
580 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
581 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
582 	}
583 
584 	/* Perform cleanup of thread-specific data */
585 	pthread__destroy_tsd(self);
586 
587 	self->pt_exitval = retval;
588 
589 	/*
590 	 * it's safe to check PT_FLAG_DETACHED without pt_flaglock
591 	 * because it's only set by pthread_detach with pt_join_lock held.
592 	 */
593 	pthread_spinlock(self, &self->pt_join_lock);
594 	if (self->pt_flags & PT_FLAG_DETACHED) {
595 		self->pt_state = PT_STATE_DEAD;
596 		pthread_spinunlock(self, &self->pt_join_lock);
597 		name = self->pt_name;
598 		self->pt_name = NULL;
599 
600 		if (name != NULL)
601 			free(name);
602 
603 		pthread_spinlock(self, &pthread__allqueue_lock);
604 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
605 		nthreads--;
606 		nt = nthreads;
607 		pthread_spinunlock(self, &pthread__allqueue_lock);
608 
609 		if (nt == 0) {
610 			/* Whoah, we're the last one. Time to go. */
611 			exit(0);
612 		}
613 
614 		/* Yeah, yeah, doing work while we're dead is tacky. */
615 		pthread_spinlock(self, &pthread__deadqueue_lock);
616 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
617 		pthread__block(self, &pthread__deadqueue_lock);
618 		SDPRINTF(("(pthread_exit %p) walking dead\n", self));
619 	} else {
620 		self->pt_state = PT_STATE_ZOMBIE;
621 		/* Note: name will be freed by the joiner. */
622 		pthread_spinlock(self, &pthread__allqueue_lock);
623 		nthreads--;
624 		nt = nthreads;
625 		pthread_spinunlock(self, &pthread__allqueue_lock);
626 		if (nt == 0) {
627 			/* Whoah, we're the last one. Time to go. */
628 			exit(0);
629 		}
630 		/*
631 		 * Wake up all the potential joiners. Only one can win.
632 		 * (Can you say "Thundering Herd"? I knew you could.)
633 		 */
634 		pthread__sched_sleepers(self, &self->pt_joiners);
635 		pthread__block(self, &self->pt_join_lock);
636 		SDPRINTF(("(pthread_exit %p) walking zombie\n", self));
637 	}
638 
639 	/*NOTREACHED*/
640 	pthread__abort();
641 	exit(1);
642 }
643 
644 
645 int
646 pthread_join(pthread_t thread, void **valptr)
647 {
648 	pthread_t self;
649 	char *name;
650 	int num;
651 
652 	self = pthread__self();
653 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
654 
655 	if (pthread__find(self, thread) != 0)
656 		return ESRCH;
657 
658 	if (thread->pt_magic != PT_MAGIC)
659 		return EINVAL;
660 
661 	if (thread == self)
662 		return EDEADLK;
663 
664 	pthread_spinlock(self, &thread->pt_flaglock);
665 
666 	if (thread->pt_flags & PT_FLAG_DETACHED) {
667 		pthread_spinunlock(self, &thread->pt_flaglock);
668 		return EINVAL;
669 	}
670 
671 	num = thread->pt_num;
672 	pthread_spinlock(self, &thread->pt_join_lock);
673 	while (thread->pt_state != PT_STATE_ZOMBIE) {
674 		if ((thread->pt_state == PT_STATE_DEAD) ||
675 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
676 		    (thread->pt_num != num)) {
677 			/*
678 			 * Another thread beat us to the join, or called
679 			 * pthread_detach(). If num didn't match, the
680 			 * thread died and was recycled before we got
681 			 * another chance to run.
682 			 */
683 			pthread_spinunlock(self, &thread->pt_join_lock);
684 			pthread_spinunlock(self, &thread->pt_flaglock);
685 			return ESRCH;
686 		}
687 		/*
688 		 * "I'm not dead yet!"
689 		 * "You will be soon enough."
690 		 */
691 		pthread_spinunlock(self, &thread->pt_flaglock);
692 		pthread_spinlock(self, &self->pt_statelock);
693 		if (self->pt_cancel) {
694 			pthread_spinunlock(self, &self->pt_statelock);
695 			pthread_spinunlock(self, &thread->pt_join_lock);
696 			pthread_exit(PTHREAD_CANCELED);
697 		}
698 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
699 		self->pt_sleepobj = thread;
700 		self->pt_sleepq = &thread->pt_joiners;
701 		self->pt_sleeplock = &thread->pt_join_lock;
702 		pthread_spinunlock(self, &self->pt_statelock);
703 
704 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
705 		pthread__block(self, &thread->pt_join_lock);
706 		pthread_spinlock(self, &thread->pt_flaglock);
707 		pthread_spinlock(self, &thread->pt_join_lock);
708 	}
709 
710 	/* All ours. */
711 	thread->pt_state = PT_STATE_DEAD;
712 	name = thread->pt_name;
713 	thread->pt_name = NULL;
714 	pthread_spinunlock(self, &thread->pt_join_lock);
715 	pthread_spinunlock(self, &thread->pt_flaglock);
716 
717 	if (valptr != NULL)
718 		*valptr = thread->pt_exitval;
719 
720 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
721 
722 	pthread__dead(self, thread);
723 
724 	if (name != NULL)
725 		free(name);
726 
727 	return 0;
728 }
729 
730 
731 int
732 pthread_equal(pthread_t t1, pthread_t t2)
733 {
734 
735 	/* Nothing special here. */
736 	return (t1 == t2);
737 }
738 
739 
740 int
741 pthread_detach(pthread_t thread)
742 {
743 	pthread_t self;
744 	int doreclaim = 0;
745 	char *name = NULL;
746 
747 	self = pthread__self();
748 
749 	if (pthread__find(self, thread) != 0)
750 		return ESRCH;
751 
752 	if (thread->pt_magic != PT_MAGIC)
753 		return EINVAL;
754 
755 	pthread_spinlock(self, &thread->pt_flaglock);
756 	pthread_spinlock(self, &thread->pt_join_lock);
757 
758 	if (thread->pt_flags & PT_FLAG_DETACHED) {
759 		pthread_spinunlock(self, &thread->pt_join_lock);
760 		pthread_spinunlock(self, &thread->pt_flaglock);
761 		return EINVAL;
762 	}
763 
764 	thread->pt_flags |= PT_FLAG_DETACHED;
765 
766 	/* Any joiners have to be punted now. */
767 	pthread__sched_sleepers(self, &thread->pt_joiners);
768 
769 	if (thread->pt_state == PT_STATE_ZOMBIE) {
770 		thread->pt_state = PT_STATE_DEAD;
771 		name = thread->pt_name;
772 		thread->pt_name = NULL;
773 		doreclaim = 1;
774 	}
775 
776 	pthread_spinunlock(self, &thread->pt_join_lock);
777 	pthread_spinunlock(self, &thread->pt_flaglock);
778 
779 	if (doreclaim) {
780 		pthread__dead(self, thread);
781 		if (name != NULL)
782 			free(name);
783 	}
784 
785 	return 0;
786 }
787 
788 
789 static void
790 pthread__dead(pthread_t self, pthread_t thread)
791 {
792 
793 	SDPRINTF(("(pthread__dead %p) Reclaimed %p.\n", self, thread));
794 	pthread__assert(thread != self);
795 	pthread__assert(thread->pt_state == PT_STATE_DEAD);
796 	pthread__assert(thread->pt_name == NULL);
797 
798 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
799 	pthread_spinlock(self, &pthread__allqueue_lock);
800 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
801 	pthread_spinunlock(self, &pthread__allqueue_lock);
802 
803 	pthread_spinlock(self, &pthread__deadqueue_lock);
804 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
805 	pthread_spinunlock(self, &pthread__deadqueue_lock);
806 }
807 
808 
809 int
810 pthread_getname_np(pthread_t thread, char *name, size_t len)
811 {
812 	pthread_t self;
813 
814 	self = pthread__self();
815 
816 	if (pthread__find(self, thread) != 0)
817 		return ESRCH;
818 
819 	if (thread->pt_magic != PT_MAGIC)
820 		return EINVAL;
821 
822 	pthread_spinlock(self, &thread->pt_join_lock);
823 	if (thread->pt_name == NULL)
824 		name[0] = '\0';
825 	else
826 		strlcpy(name, thread->pt_name, len);
827 	pthread_spinunlock(self, &thread->pt_join_lock);
828 
829 	return 0;
830 }
831 
832 
833 int
834 pthread_setname_np(pthread_t thread, const char *name, void *arg)
835 {
836 	pthread_t self;
837 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
838 	int namelen;
839 
840 	self = pthread__self();
841 	if (pthread__find(self, thread) != 0)
842 		return ESRCH;
843 
844 	if (thread->pt_magic != PT_MAGIC)
845 		return EINVAL;
846 
847 	namelen = snprintf(newname, sizeof(newname), name, arg);
848 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
849 		return EINVAL;
850 
851 	cp = strdup(newname);
852 	if (cp == NULL)
853 		return ENOMEM;
854 
855 	pthread_spinlock(self, &thread->pt_join_lock);
856 
857 	if (thread->pt_state == PT_STATE_DEAD) {
858 		pthread_spinunlock(self, &thread->pt_join_lock);
859 		free(cp);
860 		return EINVAL;
861 	}
862 
863 	oldname = thread->pt_name;
864 	thread->pt_name = cp;
865 
866 	pthread_spinunlock(self, &thread->pt_join_lock);
867 
868 	if (oldname != NULL)
869 		free(oldname);
870 
871 	return 0;
872 }
873 
874 
875 
876 /*
877  * XXX There should be a way for applications to use the efficent
878  *  inline version, but there are opacity/namespace issues.
879  */
880 pthread_t
881 pthread_self(void)
882 {
883 
884 	return pthread__self();
885 }
886 
887 
888 int
889 pthread_cancel(pthread_t thread)
890 {
891 	pthread_t self;
892 
893 	self = pthread__self();
894 #ifdef ERRORCHECK
895 	if (pthread__find(self, thread) != 0)
896 		return ESRCH;
897 #endif
898 	if (!(thread->pt_state == PT_STATE_RUNNING ||
899 	    thread->pt_state == PT_STATE_RUNNABLE ||
900 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE))
901 		return ESRCH;
902 
903 	pthread_spinlock(self, &thread->pt_flaglock);
904 	thread->pt_flags |= PT_FLAG_CS_PENDING;
905 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
906 		thread->pt_cancel = 1;
907 		pthread_spinunlock(self, &thread->pt_flaglock);
908 		pthread_spinlock(self, &thread->pt_statelock);
909 		if (thread->pt_blockgen != thread->pt_unblockgen) {
910 			/*
911 			 * It's sleeping in the kernel. If we can wake
912 			 * it up, it will notice the cancellation when
913 			 * it returns. If it doesn't wake up when we
914 			 * make this call, then it's blocked
915 			 * uninterruptably in the kernel, and there's
916 			 * not much to be done about it.
917 			 */
918 			_lwp_wakeup(thread->pt_blockedlwp);
919 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
920 			/*
921 			 * We're blocked somewhere (pthread__block()
922 			 * was called). Cause it to wake up; it will
923 			 * check for the cancellation if the routine
924 			 * is a cancellation point, and loop and reblock
925 			 * otherwise.
926 			 */
927 			pthread_spinlock(self, thread->pt_sleeplock);
928 			PTQ_REMOVE(thread->pt_sleepq, thread,
929 			    pt_sleep);
930 			pthread_spinunlock(self, thread->pt_sleeplock);
931 			pthread__sched(self, thread);
932 		} else {
933 			/*
934 			 * Nothing. The target thread is running and will
935 			 * notice at the next deferred cancellation point.
936 			 */
937 		}
938 		pthread_spinunlock(self, &thread->pt_statelock);
939 	} else
940 		pthread_spinunlock(self, &thread->pt_flaglock);
941 
942 	return 0;
943 }
944 
945 
946 int
947 pthread_setcancelstate(int state, int *oldstate)
948 {
949 	pthread_t self;
950 	int retval;
951 
952 	self = pthread__self();
953 	retval = 0;
954 
955 	pthread_spinlock(self, &self->pt_flaglock);
956 	if (oldstate != NULL) {
957 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
958 			*oldstate = PTHREAD_CANCEL_DISABLE;
959 		else
960 			*oldstate = PTHREAD_CANCEL_ENABLE;
961 	}
962 
963 	if (state == PTHREAD_CANCEL_DISABLE) {
964 		self->pt_flags |= PT_FLAG_CS_DISABLED;
965 		if (self->pt_cancel) {
966 			self->pt_flags |= PT_FLAG_CS_PENDING;
967 			self->pt_cancel = 0;
968 		}
969 	} else if (state == PTHREAD_CANCEL_ENABLE) {
970 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
971 		/*
972 		 * If a cancellation was requested while cancellation
973 		 * was disabled, note that fact for future
974 		 * cancellation tests.
975 		 */
976 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
977 			self->pt_cancel = 1;
978 			/* This is not a deferred cancellation point. */
979 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
980 				pthread_spinunlock(self, &self->pt_flaglock);
981 				pthread_exit(PTHREAD_CANCELED);
982 			}
983 		}
984 	} else
985 		retval = EINVAL;
986 
987 	pthread_spinunlock(self, &self->pt_flaglock);
988 	return retval;
989 }
990 
991 
992 int
993 pthread_setcanceltype(int type, int *oldtype)
994 {
995 	pthread_t self;
996 	int retval;
997 
998 	self = pthread__self();
999 	retval = 0;
1000 
1001 	pthread_spinlock(self, &self->pt_flaglock);
1002 
1003 	if (oldtype != NULL) {
1004 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
1005 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
1006 		else
1007 			*oldtype = PTHREAD_CANCEL_DEFERRED;
1008 	}
1009 
1010 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1011 		self->pt_flags |= PT_FLAG_CS_ASYNC;
1012 		if (self->pt_cancel) {
1013 			pthread_spinunlock(self, &self->pt_flaglock);
1014 			pthread_exit(PTHREAD_CANCELED);
1015 		}
1016 	} else if (type == PTHREAD_CANCEL_DEFERRED)
1017 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
1018 	else
1019 		retval = EINVAL;
1020 
1021 	pthread_spinunlock(self, &self->pt_flaglock);
1022 	return retval;
1023 }
1024 
1025 
1026 void
1027 pthread_testcancel()
1028 {
1029 	pthread_t self;
1030 
1031 	self = pthread__self();
1032 	if (self->pt_cancel)
1033 		pthread_exit(PTHREAD_CANCELED);
1034 }
1035 
1036 
1037 /*
1038  * POSIX requires that certain functions return an error rather than
1039  * invoking undefined behavior even when handed completely bogus
1040  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
1041  * utility routine searches the list of threads for the pthread_t
1042  * value without dereferencing it.
1043  */
1044 int
1045 pthread__find(pthread_t self, pthread_t id)
1046 {
1047 	pthread_t target;
1048 
1049 	pthread_spinlock(self, &pthread__allqueue_lock);
1050 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
1051 	    if (target == id)
1052 		    break;
1053 	pthread_spinunlock(self, &pthread__allqueue_lock);
1054 
1055 	if (target == NULL)
1056 		return ESRCH;
1057 
1058 	return 0;
1059 }
1060 
1061 
1062 void
1063 pthread__testcancel(pthread_t self)
1064 {
1065 
1066 	if (self->pt_cancel)
1067 		pthread_exit(PTHREAD_CANCELED);
1068 }
1069 
1070 
1071 void
1072 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
1073 {
1074 	pthread_t self;
1075 	struct pt_clean_t *entry;
1076 
1077 	self = pthread__self();
1078 	entry = store;
1079 	entry->ptc_cleanup = cleanup;
1080 	entry->ptc_arg = arg;
1081 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1082 }
1083 
1084 
1085 void
1086 pthread__cleanup_pop(int ex, void *store)
1087 {
1088 	pthread_t self;
1089 	struct pt_clean_t *entry;
1090 
1091 	self = pthread__self();
1092 	entry = store;
1093 
1094 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
1095 	if (ex)
1096 		(*entry->ptc_cleanup)(entry->ptc_arg);
1097 }
1098 
1099 
1100 int *
1101 pthread__errno(void)
1102 {
1103 	pthread_t self;
1104 
1105 	self = pthread__self();
1106 
1107 	return &(self->pt_errno);
1108 }
1109 
1110 ssize_t	_sys_write(int, const void *, size_t);
1111 
1112 void
1113 pthread__assertfunc(const char *file, int line, const char *function,
1114 		    const char *expr)
1115 {
1116 	char buf[1024];
1117 	int len;
1118 
1119 	SDPRINTF(("(af)\n"));
1120 
1121 	/*
1122 	 * snprintf should not acquire any locks, or we could
1123 	 * end up deadlocked if the assert caller held locks.
1124 	 */
1125 	len = snprintf(buf, 1024,
1126 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
1127 	    expr, file, line,
1128 	    function ? ", function \"" : "",
1129 	    function ? function : "",
1130 	    function ? "\"" : "");
1131 
1132 	_sys_write(STDERR_FILENO, buf, (size_t)len);
1133 	(void)kill(getpid(), SIGABRT);
1134 
1135 	_exit(1);
1136 }
1137 
1138 
1139 void
1140 pthread__errorfunc(const char *file, int line, const char *function,
1141 		   const char *msg)
1142 {
1143 	char buf[1024];
1144 	size_t len;
1145 
1146 	if (pthread__diagassert == 0)
1147 		return;
1148 
1149 	/*
1150 	 * snprintf should not acquire any locks, or we could
1151 	 * end up deadlocked if the assert caller held locks.
1152 	 */
1153 	len = snprintf(buf, 1024,
1154 	    "%s: Error detected by libpthread: %s.\n"
1155 	    "Detected by file \"%s\", line %d%s%s%s.\n"
1156 	    "See pthread(3) for information.\n",
1157 	    getprogname(), msg, file, line,
1158 	    function ? ", function \"" : "",
1159 	    function ? function : "",
1160 	    function ? "\"" : "");
1161 
1162 	if (pthread__diagassert & DIAGASSERT_STDERR)
1163 		_sys_write(STDERR_FILENO, buf, len);
1164 
1165 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
1166 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
1167 
1168 	if (pthread__diagassert & DIAGASSERT_ABORT) {
1169 		(void)kill(getpid(), SIGABRT);
1170 		_exit(1);
1171 	}
1172 }
1173