xref: /netbsd-src/lib/libpthread/pthread.c (revision 3247035dcc7aa94a68df21697fcf2fb68a906eb6)
1 /*	$NetBSD: pthread.c,v 1.59 2007/02/09 23:53:24 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2003, 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.59 2007/02/09 23:53:24 ad Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <syslog.h>
50 #include <ucontext.h>
51 #include <unistd.h>
52 #include <sys/param.h>
53 #include <sys/sysctl.h>
54 #ifdef PTHREAD_MLOCK_KLUDGE
55 #include <sys/mman.h>
56 #endif
57 
58 #include <sched.h>
59 #include "pthread.h"
60 #include "pthread_int.h"
61 
62 #ifdef PTHREAD_MAIN_DEBUG
63 #define SDPRINTF(x) DPRINTF(x)
64 #else
65 #define SDPRINTF(x)
66 #endif
67 
68 /* Maximum number of LWPs to unpark in one operation. */
69 #define	PTHREAD__UNPARK_MAX	128
70 
71 /* How many times to try acquiring spin locks on MP systems. */
72 #define	PTHREAD__NSPINS		1000
73 
74 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
75 static void	pthread__dead(pthread_t, pthread_t);
76 
77 int pthread__started;
78 
79 pthread_spin_t pthread__allqueue_lock = __SIMPLELOCK_UNLOCKED;
80 struct pthread_queue_t pthread__allqueue;
81 
82 pthread_spin_t pthread__deadqueue_lock = __SIMPLELOCK_UNLOCKED;
83 struct pthread_queue_t pthread__deadqueue;
84 struct pthread_queue_t *pthread__reidlequeue;
85 
86 static int nthreads;
87 static int nextthread;
88 static pthread_spin_t nextthread_lock = __SIMPLELOCK_UNLOCKED;
89 static pthread_attr_t pthread_default_attr;
90 
91 enum {
92 	DIAGASSERT_ABORT =	1<<0,
93 	DIAGASSERT_STDERR =	1<<1,
94 	DIAGASSERT_SYSLOG =	1<<2
95 };
96 
97 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
98 
99 #ifdef PTHREAD_SA
100 pthread_spin_t pthread__runqueue_lock = __SIMPLELOCK_UNLOCKED;
101 struct pthread_queue_t pthread__runqueue;
102 struct pthread_queue_t pthread__idlequeue;
103 struct pthread_queue_t pthread__suspqueue;
104 #endif
105 
106 int pthread__concurrency, pthread__maxconcurrency, pthread__nspins;
107 int pthread__unpark_max = PTHREAD__UNPARK_MAX;
108 
109 int _sys___sigprocmask14(int, const sigset_t *, sigset_t *);
110 
111 __strong_alias(__libc_thr_self,pthread_self)
112 __strong_alias(__libc_thr_create,pthread_create)
113 __strong_alias(__libc_thr_exit,pthread_exit)
114 __strong_alias(__libc_thr_errno,pthread__errno)
115 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
116 
117 /*
118  * Static library kludge.  Place a reference to a symbol any library
119  * file which does not already have a reference here.
120  */
121 extern int pthread__cancel_stub_binder;
122 #ifdef PTHREAD_SA
123 extern int pthread__sched_binder;
124 extern struct pthread_queue_t pthread__nanosleeping;
125 #endif
126 
127 void *pthread__static_lib_binder[] = {
128 	&pthread__cancel_stub_binder,
129 	pthread_cond_init,
130 	pthread_mutex_init,
131 	pthread_rwlock_init,
132 	pthread_barrier_init,
133 	pthread_key_create,
134 	pthread_setspecific,
135 #ifdef PTHREAD_SA
136 	&pthread__sched_binder,
137 	&pthread__nanosleeping
138 #endif
139 };
140 
141 /*
142  * This needs to be started by the library loading code, before main()
143  * gets to run, for various things that use the state of the initial thread
144  * to work properly (thread-specific data is an application-visible example;
145  * spinlock counts for mutexes is an internal example).
146  */
147 void
148 pthread_init(void)
149 {
150 	pthread_t first;
151 	char *p;
152 	int i, mib[2], ncpu;
153 	size_t len;
154 	extern int __isthreaded;
155 #ifdef PTHREAD_MLOCK_KLUDGE
156 	int ret;
157 #endif
158 
159 	mib[0] = CTL_HW;
160 	mib[1] = HW_NCPU;
161 
162 	len = sizeof(ncpu);
163 	sysctl(mib, 2, &ncpu, &len, NULL, 0);
164 
165 	/* Initialize locks first; they're needed elsewhere. */
166 	pthread__lockprim_init(ncpu);
167 
168 #ifdef PTHREAD_SA
169 	/* Find out requested/possible concurrency */
170 	p = getenv("PTHREAD_CONCURRENCY");
171 	pthread__maxconcurrency = p ? atoi(p) : 1;
172 
173 	if (pthread__maxconcurrency < 1)
174 		pthread__maxconcurrency = 1;
175 	if (pthread__maxconcurrency > ncpu)
176 		pthread__maxconcurrency = ncpu;
177 
178 	/* Allocate data structures */
179 	pthread__reidlequeue = (struct pthread_queue_t *)malloc
180 		(pthread__maxconcurrency * sizeof(struct pthread_queue_t));
181 	if (pthread__reidlequeue == NULL)
182 		err(1, "Couldn't allocate memory for pthread__reidlequeue");
183 
184 	pthread__nspins = PTHREAD__NSPINS;
185 #else
186 	/*
187 	 * Get number of CPUs, and maximum number of LWPs that can be
188 	 * unparked at once.
189 	 */
190 	if ((pthread__concurrency = ncpu) > 1)
191 		pthread__nspins = PTHREAD__NSPINS;
192 	else
193 		pthread__nspins = 1;
194 	i = (int)_lwp_unpark_all(NULL, 0, NULL);
195 	if (i == -1)
196 		err(1, "_lwp_unpark_all");
197 	if (i < pthread__unpark_max)
198 		pthread__unpark_max = i;
199 #endif
200 
201 	/* Basic data structure setup */
202 	pthread_attr_init(&pthread_default_attr);
203 	PTQ_INIT(&pthread__allqueue);
204 	PTQ_INIT(&pthread__deadqueue);
205 #ifdef PTHREAD_MLOCK_KLUDGE
206 	ret = mlock(&pthread__deadqueue, sizeof(pthread__deadqueue));
207 	pthread__assert(ret == 0);
208 #endif
209 #ifdef PTHREAD_SA
210 	PTQ_INIT(&pthread__runqueue);
211 	PTQ_INIT(&pthread__idlequeue);
212 	for (i = 0; i < pthread__maxconcurrency; i++)
213 		PTQ_INIT(&pthread__reidlequeue[i]);
214 #endif
215 	nthreads = 1;
216 	/* Create the thread structure corresponding to main() */
217 	pthread__initmain(&first);
218 	pthread__initthread(first, first);
219 
220 	first->pt_state = PT_STATE_RUNNING;
221 #ifdef PTHREAD_SA
222 	_sys___sigprocmask14(0, NULL, &first->pt_sigmask);
223 #else
224 	first->pt_lid = _lwp_self();
225 #endif
226 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
227 
228 	/* Start subsystems */
229 #ifdef PTHREAD_SA
230 	pthread__signal_init();
231 #endif
232 	PTHREAD_MD_INIT
233 #ifdef PTHREAD__DEBUG
234 	pthread__debug_init(ncpu);
235 #endif
236 
237 	for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
238 		switch (*p) {
239 		case 'a':
240 			pthread__diagassert |= DIAGASSERT_ABORT;
241 			break;
242 		case 'A':
243 			pthread__diagassert &= ~DIAGASSERT_ABORT;
244 			break;
245 		case 'e':
246 			pthread__diagassert |= DIAGASSERT_STDERR;
247 			break;
248 		case 'E':
249 			pthread__diagassert &= ~DIAGASSERT_STDERR;
250 			break;
251 		case 'l':
252 			pthread__diagassert |= DIAGASSERT_SYSLOG;
253 			break;
254 		case 'L':
255 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
256 			break;
257 		}
258 	}
259 
260 
261 	/* Tell libc that we're here and it should role-play accordingly. */
262 	__isthreaded = 1;
263 }
264 
265 static void
266 pthread__child_callback(void)
267 {
268 	/*
269 	 * Clean up data structures that a forked child process might
270 	 * trip over. Note that if threads have been created (causing
271 	 * this handler to be registered) the standards say that the
272 	 * child will trigger undefined behavior if it makes any
273 	 * pthread_* calls (or any other calls that aren't
274 	 * async-signal-safe), so we don't really have to clean up
275 	 * much. Anything that permits some pthread_* calls to work is
276 	 * merely being polite.
277 	 */
278 	pthread__started = 0;
279 }
280 
281 static void
282 pthread__start(void)
283 {
284 	pthread_t self;
285 #ifdef PTHREAD_SA
286 	pthread_t idle;
287 	int i, ret;
288 #endif
289 
290 	self = pthread__self(); /* should be the "main()" thread */
291 
292 	/*
293 	 * Per-process timers are cleared by fork(); despite the
294 	 * various restrictions on fork() and threads, it's legal to
295 	 * fork() before creating any threads.
296 	 */
297 #ifdef PTHREAD_SA
298 	pthread__alarm_init();
299 
300 	pthread__signal_start();
301 #endif
302 
303 	pthread_atfork(NULL, NULL, pthread__child_callback);
304 
305 #ifdef PTHREAD_SA
306 	/*
307 	 * Create idle threads
308 	 * XXX need to create more idle threads if concurrency > 3
309 	 */
310 	for (i = 0; i < NIDLETHREADS; i++) {
311 		ret = pthread__stackalloc(&idle);
312 		if (ret != 0)
313 			err(1, "Couldn't allocate stack for idle thread!");
314 		pthread__initthread(self, idle);
315 		sigfillset(&idle->pt_sigmask);
316 		idle->pt_type = PT_THREAD_IDLE;
317 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
318 		pthread__sched_idle(self, idle);
319 	}
320 
321 	/* Start up the SA subsystem */
322 	pthread__sa_start();
323 #endif
324 
325 	SDPRINTF(("(pthread__start %p) Started.\n", self));
326 }
327 
328 
329 /* General-purpose thread data structure sanitization. */
330 void
331 pthread__initthread(pthread_t self, pthread_t t)
332 {
333 	int id;
334 
335 	pthread_spinlock(self, &nextthread_lock);
336 	id = nextthread;
337 	nextthread++;
338 	pthread_spinunlock(self, &nextthread_lock);
339 	t->pt_num = id;
340 
341 	t->pt_magic = PT_MAGIC;
342 	pthread_lockinit(&t->pt_flaglock);
343 	t->pt_spinlocks = 0;
344 	t->pt_exitval = NULL;
345 	t->pt_flags = 0;
346 	t->pt_cancel = 0;
347 	t->pt_errno = 0;
348 
349 #ifdef PTHREAD_SA
350 	t->pt_type = PT_THREAD_NORMAL;
351 	t->pt_state = PT_STATE_RUNNABLE;
352 	t->pt_heldlock = NULL;
353 	t->pt_next = NULL;
354 	t->pt_parent = NULL;
355 	t->pt_switchto = NULL;
356 	t->pt_trapuc = NULL;
357 	sigemptyset(&t->pt_siglist);
358 	sigemptyset(&t->pt_sigmask);
359 	pthread_lockinit(&t->pt_siglock);
360 #else
361 	t->pt_state = PT_STATE_RUNNING;
362 #endif
363 
364 	pthread_lockinit(&t->pt_statelock);
365 
366 	PTQ_INIT(&t->pt_joiners);
367 	pthread_lockinit(&t->pt_join_lock);
368 	PTQ_INIT(&t->pt_cleanup_stack);
369 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
370 	t->pt_name = NULL;
371 
372 #if defined(PTHREAD__DEBUG) && defined(PTHREAD_SA)
373 	t->blocks = 0;
374 	t->preempts = 0;
375 	t->rescheds = 0;
376 #endif
377 }
378 
379 
380 int
381 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
382 	    void *(*startfunc)(void *), void *arg)
383 {
384 	pthread_t self, newthread;
385 	pthread_attr_t nattr;
386 	struct pthread_attr_private *p;
387 	char * volatile name;
388 	int ret;
389 #ifndef PTHREAD_SA
390 	int flag;
391 #endif
392 
393 	PTHREADD_ADD(PTHREADD_CREATE);
394 
395 	/*
396 	 * It's okay to check this without a lock because there can
397 	 * only be one thread before it becomes true.
398 	 */
399 	if (pthread__started == 0) {
400 		pthread__start();
401 		pthread__started = 1;
402 	}
403 
404 	if (attr == NULL)
405 		nattr = pthread_default_attr;
406 	else if (attr->pta_magic == PT_ATTR_MAGIC)
407 		nattr = *attr;
408 	else
409 		return EINVAL;
410 
411 	/* Fetch misc. attributes from the attr structure. */
412 	name = NULL;
413 	if ((p = nattr.pta_private) != NULL)
414 		if (p->ptap_name[0] != '\0')
415 			if ((name = strdup(p->ptap_name)) == NULL)
416 				return ENOMEM;
417 
418 	self = pthread__self();
419 
420 	pthread_spinlock(self, &pthread__deadqueue_lock);
421 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
422 		newthread = PTQ_FIRST(&pthread__deadqueue);
423 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
424 		pthread_spinunlock(self, &pthread__deadqueue_lock);
425 	} else {
426 		pthread_spinunlock(self, &pthread__deadqueue_lock);
427 		/* Set up a stack and allocate space for a pthread_st. */
428 		ret = pthread__stackalloc(&newthread);
429 		if (ret != 0) {
430 			if (name)
431 				free(name);
432 			return ret;
433 		}
434 	}
435 
436 	/* 2. Set up state. */
437 	pthread__initthread(self, newthread);
438 	newthread->pt_flags = nattr.pta_flags;
439 #ifdef PTHREAD_SA
440 	newthread->pt_sigmask = self->pt_sigmask;
441 #endif
442 
443 	/* 3. Set up misc. attributes. */
444 	newthread->pt_name = name;
445 
446 	/*
447 	 * 4. Set up context.
448 	 *
449 	 * The pt_uc pointer points to a location safely below the
450 	 * stack start; this is arranged by pthread__stackalloc().
451 	 */
452 	_INITCONTEXT_U(newthread->pt_uc);
453 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
454 	pthread__uc_id(newthread->pt_uc) = newthread;
455 #endif
456 	newthread->pt_uc->uc_stack = newthread->pt_stack;
457 	newthread->pt_uc->uc_link = NULL;
458 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
459 	    startfunc, arg);
460 
461 	/* 5. Add to list of all threads. */
462 	pthread_spinlock(self, &pthread__allqueue_lock);
463 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
464 	nthreads++;
465 	pthread_spinunlock(self, &pthread__allqueue_lock);
466 
467 #ifndef PTHREAD_SA
468 	/* 5a. Create the new LWP. */
469 	newthread->pt_sleeponq = 0;
470 	flag = 0;
471 	if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0)
472 		flag |= LWP_SUSPENDED;
473 	if ((newthread->pt_flags & PT_FLAG_DETACHED) != 0)
474 		flag |= LWP_DETACHED;
475 	ret = _lwp_create(newthread->pt_uc, (u_long)flag, &newthread->pt_lid);
476 	if (ret != 0) {
477 		SDPRINTF(("(pthread_create %p) _lwp_create: %s\n",
478 		    strerror(errno)));
479 		free(name);
480 		pthread_spinlock(self, &pthread__allqueue_lock);
481 		PTQ_REMOVE(&pthread__allqueue, newthread, pt_allq);
482 		nthreads--;
483 		pthread_spinunlock(self, &pthread__allqueue_lock);
484 		pthread_spinlock(self, &pthread__deadqueue_lock);
485 		PTQ_INSERT_HEAD(&pthread__deadqueue, newthread, pt_allq);
486 		pthread_spinunlock(self, &pthread__deadqueue_lock);
487 		return ret;
488 	}
489 #endif
490 
491 #ifdef PTHREAD_SA
492 	SDPRINTF(("(pthread_create %p) new thread %p (name pointer %p).\n",
493 		  self, newthread, newthread->pt_name));
494 	/* 6. Put on appropriate queue. */
495 	if (newthread->pt_flags & PT_FLAG_SUSPENDED) {
496 		pthread_spinlock(self, &newthread->pt_statelock);
497 		pthread__suspend(self, newthread);
498 		pthread_spinunlock(self, &newthread->pt_statelock);
499 	} else
500 		pthread__sched(self, newthread);
501 #else
502 	SDPRINTF(("(pthread_create %p) new thread %p (name %p, lid %d).\n",
503 		  self, newthread, newthread->pt_name,
504 		  (int)newthread->pt_lid));
505 #endif
506 
507 	*thread = newthread;
508 
509 	return 0;
510 }
511 
512 
513 static void
514 pthread__create_tramp(void *(*start)(void *), void *arg)
515 {
516 	void *retval;
517 
518 	retval = (*start)(arg);
519 
520 	pthread_exit(retval);
521 
522 	/*NOTREACHED*/
523 	pthread__abort();
524 }
525 
526 int
527 pthread_suspend_np(pthread_t thread)
528 {
529 	pthread_t self;
530 
531 	self = pthread__self();
532 	if (self == thread) {
533 		return EDEADLK;
534 	}
535 #ifdef ERRORCHECK
536 	if (pthread__find(self, thread) != 0)
537 		return ESRCH;
538 #endif
539 #ifdef PTHREAD_SA
540 	SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p (state %d).\n",
541 		     self, thread, thread->pt_state));
542 	pthread_spinlock(self, &thread->pt_statelock);
543 	if (thread->pt_blockgen != thread->pt_unblockgen) {
544 		/* XXX flaglock? */
545 		thread->pt_flags |= PT_FLAG_SUSPENDED;
546 		pthread_spinunlock(self, &thread->pt_statelock);
547 		return 0;
548 	}
549 	switch (thread->pt_state) {
550 	case PT_STATE_RUNNING:
551 		pthread__abort();	/* XXX */
552 		break;
553 	case PT_STATE_SUSPENDED:
554 		pthread_spinunlock(self, &thread->pt_statelock);
555 		return 0;
556 	case PT_STATE_RUNNABLE:
557 		pthread_spinlock(self, &pthread__runqueue_lock);
558 		PTQ_REMOVE(&pthread__runqueue, thread, pt_runq);
559 		pthread_spinunlock(self, &pthread__runqueue_lock);
560 		break;
561 	case PT_STATE_BLOCKED_QUEUE:
562 		pthread_spinlock(self, thread->pt_sleeplock);
563 		PTQ_REMOVE(thread->pt_sleepq, thread, pt_sleep);
564 		pthread_spinunlock(self, thread->pt_sleeplock);
565 		break;
566 	case PT_STATE_ZOMBIE:
567 		goto out;
568 	default:
569 		break;			/* XXX */
570 	}
571 	pthread__suspend(self, thread);
572 
573 out:
574 	pthread_spinunlock(self, &thread->pt_statelock);
575 	return 0;
576 #else
577 	SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p.\n",
578 		     self, thread));
579 	return _lwp_suspend(thread->pt_lid);
580 #endif
581 }
582 
583 int
584 pthread_resume_np(pthread_t thread)
585 {
586 	pthread_t self;
587 
588 	self = pthread__self();
589 #ifdef ERRORCHECK
590 	if (pthread__find(self, thread) != 0)
591 		return ESRCH;
592 #endif
593 #ifdef PTHREAD_SA
594 	SDPRINTF(("(pthread_resume_np %p) Resume thread %p (state %d).\n",
595 		     self, thread, thread->pt_state));
596 	pthread_spinlock(self, &thread->pt_statelock);
597 	/* XXX flaglock? */
598 	thread->pt_flags &= ~PT_FLAG_SUSPENDED;
599 	if (thread->pt_state == PT_STATE_SUSPENDED) {
600 		pthread_spinlock(self, &pthread__runqueue_lock);
601 		PTQ_REMOVE(&pthread__suspqueue, thread, pt_runq);
602 		pthread_spinunlock(self, &pthread__runqueue_lock);
603 		pthread__sched(self, thread);
604 	}
605 	pthread_spinunlock(self, &thread->pt_statelock);
606 	return 0;
607 #else
608 	SDPRINTF(("(pthread_resume_np %p) Resume thread %p.\n",
609 		     self, thread));
610 	return _lwp_continue(thread->pt_lid);
611 #endif
612 }
613 
614 #ifdef PTHREAD_SA
615 /*
616  * Other threads will switch to the idle thread so that they
617  * can dispose of any awkward locks or recycle upcall state.
618  */
619 void
620 pthread__idle(void)
621 {
622 	pthread_t self;
623 
624 	PTHREADD_ADD(PTHREADD_IDLE);
625 	self = pthread__self();
626 	SDPRINTF(("(pthread__idle %p).\n", self));
627 
628 	/*
629 	 * The drill here is that we want to yield the processor,
630 	 * but for the thread itself to be recovered, we need to be on
631 	 * a list somewhere for the thread system to know about us.
632 	 */
633 	pthread_spinlock(self, &pthread__deadqueue_lock);
634 	PTQ_INSERT_TAIL(&pthread__reidlequeue[self->pt_vpid], self, pt_runq);
635 	pthread__concurrency--;
636 	SDPRINTF(("(yield %p concurrency) now %d\n", self,
637 		     pthread__concurrency));
638 	/* Don't need a flag lock; nothing else has a handle on this thread */
639 	self->pt_flags |= PT_FLAG_IDLED;
640 	pthread_spinunlock(self, &pthread__deadqueue_lock);
641 
642 	/*
643 	 * If we get to run this, then no preemption has happened
644 	 * (because the upcall handler will not continue an idle thread with
645 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
646 	 */
647 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
648 	sa_yield();
649 
650 	/* NOTREACHED */
651 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
652 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
653 	pthread__abort();
654 }
655 #endif
656 
657 
658 void
659 pthread_exit(void *retval)
660 {
661 	pthread_t self;
662 	struct pt_clean_t *cleanup;
663 	char *name;
664 	int nt;
665 
666 	self = pthread__self();
667 	SDPRINTF(("(pthread_exit %p) status %p, flags %x, cancel %d\n",
668 		  self, retval, self->pt_flags, self->pt_cancel));
669 
670 	/* Disable cancellability. */
671 	pthread_spinlock(self, &self->pt_flaglock);
672 	self->pt_flags |= PT_FLAG_CS_DISABLED;
673 	self->pt_cancel = 0;
674 	pthread_spinunlock(self, &self->pt_flaglock);
675 
676 	/* Call any cancellation cleanup handlers */
677 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
678 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
679 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
680 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
681 	}
682 
683 	/* Perform cleanup of thread-specific data */
684 	pthread__destroy_tsd(self);
685 
686 	self->pt_exitval = retval;
687 
688 	/*
689 	 * it's safe to check PT_FLAG_DETACHED without pt_flaglock
690 	 * because it's only set by pthread_detach with pt_join_lock held.
691 	 */
692 	pthread_spinlock(self, &self->pt_join_lock);
693 	if (self->pt_flags & PT_FLAG_DETACHED) {
694 		self->pt_state = PT_STATE_DEAD;
695 		pthread_spinunlock(self, &self->pt_join_lock);
696 		name = self->pt_name;
697 		self->pt_name = NULL;
698 
699 		if (name != NULL)
700 			free(name);
701 
702 		pthread_spinlock(self, &pthread__allqueue_lock);
703 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
704 		nthreads--;
705 		nt = nthreads;
706 		pthread_spinunlock(self, &pthread__allqueue_lock);
707 
708 		if (nt == 0) {
709 			/* Whoah, we're the last one. Time to go. */
710 			exit(0);
711 		}
712 
713 		/* Yeah, yeah, doing work while we're dead is tacky. */
714 		pthread_spinlock(self, &pthread__deadqueue_lock);
715 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
716 
717 #ifdef PTHREAD_SA
718 		pthread__block(self, &pthread__deadqueue_lock);
719 		SDPRINTF(("(pthread_exit %p) walking dead\n", self));
720 		pthread_spinunlock(self, &pthread__allqueue_lock);
721 #else
722 		pthread_spinunlock(self, &pthread__deadqueue_lock);
723 		/* XXXLWP race against stack being reclaimed. */
724 		_lwp_exit();
725 #endif
726 	} else {
727 		self->pt_state = PT_STATE_ZOMBIE;
728 
729 		/* Note: name will be freed by the joiner. */
730 		pthread_spinlock(self, &pthread__allqueue_lock);
731 		nthreads--;
732 		nt = nthreads;
733 		pthread_spinunlock(self, &pthread__allqueue_lock);
734 		if (nt == 0) {
735 			/* Whoah, we're the last one. Time to go. */
736 			exit(0);
737 		}
738 
739 #ifdef PTHREAD_SA
740 		/*
741 		 * Wake up all the potential joiners. Only one can win.
742 		 * (Can you say "Thundering Herd"? I knew you could.)
743 		 */
744 		pthread__sched_sleepers(self, &self->pt_joiners);
745 		pthread__block(self, &self->pt_join_lock);
746 		SDPRINTF(("(pthread_exit %p) walking zombie\n", self));
747 #else
748 		pthread_spinunlock(self, &self->pt_join_lock);
749 		_lwp_exit();
750 #endif
751 	}
752 
753 	/*NOTREACHED*/
754 	pthread__abort();
755 	exit(1);
756 }
757 
758 
759 int
760 pthread_join(pthread_t thread, void **valptr)
761 {
762 	pthread_t self;
763 	char *name;
764 	int num, retval;
765 
766 	self = pthread__self();
767 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
768 
769 	if (pthread__find(self, thread) != 0)
770 		return ESRCH;
771 
772 	if (thread->pt_magic != PT_MAGIC)
773 		return EINVAL;
774 
775 	if (thread == self)
776 		return EDEADLK;
777 
778 #ifdef PTHREAD_SA
779 	pthread_spinlock(self, &thread->pt_flaglock);
780 
781 	if (thread->pt_flags & PT_FLAG_DETACHED) {
782 		pthread_spinunlock(self, &thread->pt_flaglock);
783 		return EINVAL;
784 	}
785 
786 	num = thread->pt_num;
787 	pthread_spinlock(self, &thread->pt_join_lock);
788 	while (thread->pt_state != PT_STATE_ZOMBIE) {
789 		if ((thread->pt_state == PT_STATE_DEAD) ||
790 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
791 		    (thread->pt_num != num)) {
792 			/*
793 			 * Another thread beat us to the join, or called
794 			 * pthread_detach(). If num didn't match, the
795 			 * thread died and was recycled before we got
796 			 * another chance to run.
797 			 */
798 			pthread_spinunlock(self, &thread->pt_join_lock);
799 			pthread_spinunlock(self, &thread->pt_flaglock);
800 			return ESRCH;
801 		}
802 		/*
803 		 * "I'm not dead yet!"
804 		 * "You will be soon enough."
805 		 */
806 		pthread_spinunlock(self, &thread->pt_flaglock);
807 		pthread_spinlock(self, &self->pt_statelock);
808 		if (self->pt_cancel) {
809 			pthread_spinunlock(self, &self->pt_statelock);
810 			pthread_spinunlock(self, &thread->pt_join_lock);
811 			pthread_exit(PTHREAD_CANCELED);
812 		}
813 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
814 		self->pt_sleepobj = thread;
815 		self->pt_sleepq = &thread->pt_joiners;
816 		self->pt_sleeplock = &thread->pt_join_lock;
817 		pthread_spinunlock(self, &self->pt_statelock);
818 
819 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
820 		pthread__block(self, &thread->pt_join_lock);
821 		pthread_spinlock(self, &thread->pt_flaglock);
822 		pthread_spinlock(self, &thread->pt_join_lock);
823 	}
824 
825 	/* All ours. */
826 	thread->pt_state = PT_STATE_DEAD;
827 	name = thread->pt_name;
828 	thread->pt_name = NULL;
829 	pthread_spinunlock(self, &thread->pt_join_lock);
830 	pthread_spinunlock(self, &thread->pt_flaglock);
831 
832 	if (valptr != NULL)
833 		*valptr = thread->pt_exitval;
834 
835 	retval = 0;
836 #else	/* PTHREAD_SA */
837 	retval = 0;
838 	name = NULL;
839  again:
840  	pthread_spinlock(self, &thread->pt_join_lock);
841 	switch (thread->pt_state) {
842 	case PT_STATE_RUNNING:
843 		pthread_spinunlock(self, &thread->pt_join_lock);
844 
845 		/*
846 		 * IEEE Std 1003.1, 2004 Edition:
847 		 *
848 		 * "The pthread_join() function shall not
849 		 * return an error code of [EINTR]."
850 		 */
851 		if (_lwp_wait(thread->pt_lid, &num) != 0 && errno != EINTR)
852 			return errno;
853 		goto again;
854 	case PT_STATE_ZOMBIE:
855 		if (valptr != NULL)
856 			*valptr = thread->pt_exitval;
857 		if (retval == 0) {
858 			name = thread->pt_name;
859 			thread->pt_name = NULL;
860 		}
861 		thread->pt_state = PT_STATE_DEAD;
862 		pthread_spinunlock(self, &thread->pt_join_lock);
863 		(void)_lwp_detach(thread->pt_lid);
864 		break;
865 	default:
866 		pthread_spinunlock(self, &thread->pt_join_lock);
867 		return EINVAL;
868 	}
869 #endif	/* PTHREAD_SA */
870 
871 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
872 
873 	pthread__dead(self, thread);
874 
875 	if (name != NULL)
876 		free(name);
877 
878 	return retval;
879 }
880 
881 
882 int
883 pthread_equal(pthread_t t1, pthread_t t2)
884 {
885 
886 	/* Nothing special here. */
887 	return (t1 == t2);
888 }
889 
890 
891 int
892 pthread_detach(pthread_t thread)
893 {
894 	pthread_t self;
895 #ifdef PTHREAD_SA
896 	int doreclaim = 0;
897 	char *name = NULL;
898 #endif
899 
900 	self = pthread__self();
901 
902 	if (pthread__find(self, thread) != 0)
903 		return ESRCH;
904 
905 	if (thread->pt_magic != PT_MAGIC)
906 		return EINVAL;
907 
908 #ifdef PTHREAD_SA
909 	pthread_spinlock(self, &thread->pt_flaglock);
910 	pthread_spinlock(self, &thread->pt_join_lock);
911 
912 	if (thread->pt_flags & PT_FLAG_DETACHED) {
913 		pthread_spinunlock(self, &thread->pt_join_lock);
914 		pthread_spinunlock(self, &thread->pt_flaglock);
915 		return EINVAL;
916 	}
917 
918 	thread->pt_flags |= PT_FLAG_DETACHED;
919 
920 	/* Any joiners have to be punted now. */
921 	pthread__sched_sleepers(self, &thread->pt_joiners);
922 
923 	if (thread->pt_state == PT_STATE_ZOMBIE) {
924 		thread->pt_state = PT_STATE_DEAD;
925 		name = thread->pt_name;
926 		thread->pt_name = NULL;
927 		doreclaim = 1;
928 	}
929 
930 	pthread_spinunlock(self, &thread->pt_join_lock);
931 	pthread_spinunlock(self, &thread->pt_flaglock);
932 
933 	if (doreclaim) {
934 		pthread__dead(self, thread);
935 		if (name != NULL)
936 			free(name);
937 	}
938 
939 	return 0;
940 #else
941 	return _lwp_detach(thread->pt_lid);
942 #endif
943 }
944 
945 
946 static void
947 pthread__dead(pthread_t self, pthread_t thread)
948 {
949 
950 	SDPRINTF(("(pthread__dead %p) Reclaimed %p.\n", self, thread));
951 #ifdef PTHREAD_SA
952 	pthread__assert(thread != self);
953 #endif
954 	pthread__assert(thread->pt_state == PT_STATE_DEAD);
955 	pthread__assert(thread->pt_name == NULL);
956 
957 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
958 	pthread_spinlock(self, &pthread__allqueue_lock);
959 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
960 	pthread_spinunlock(self, &pthread__allqueue_lock);
961 
962 	pthread_spinlock(self, &pthread__deadqueue_lock);
963 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
964 	pthread_spinunlock(self, &pthread__deadqueue_lock);
965 }
966 
967 
968 int
969 pthread_getname_np(pthread_t thread, char *name, size_t len)
970 {
971 	pthread_t self;
972 
973 	self = pthread__self();
974 
975 	if (pthread__find(self, thread) != 0)
976 		return ESRCH;
977 
978 	if (thread->pt_magic != PT_MAGIC)
979 		return EINVAL;
980 
981 	pthread_spinlock(self, &thread->pt_join_lock);
982 	if (thread->pt_name == NULL)
983 		name[0] = '\0';
984 	else
985 		strlcpy(name, thread->pt_name, len);
986 	pthread_spinunlock(self, &thread->pt_join_lock);
987 
988 	return 0;
989 }
990 
991 
992 int
993 pthread_setname_np(pthread_t thread, const char *name, void *arg)
994 {
995 	pthread_t self;
996 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
997 	int namelen;
998 
999 	self = pthread__self();
1000 	if (pthread__find(self, thread) != 0)
1001 		return ESRCH;
1002 
1003 	if (thread->pt_magic != PT_MAGIC)
1004 		return EINVAL;
1005 
1006 	namelen = snprintf(newname, sizeof(newname), name, arg);
1007 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
1008 		return EINVAL;
1009 
1010 	cp = strdup(newname);
1011 	if (cp == NULL)
1012 		return ENOMEM;
1013 
1014 	pthread_spinlock(self, &thread->pt_join_lock);
1015 
1016 #ifdef PTHREAD_SA
1017 	if (thread->pt_state == PT_STATE_DEAD) {
1018 		pthread_spinunlock(self, &thread->pt_join_lock);
1019 		free(cp);
1020 		return EINVAL;
1021 	}
1022 #endif
1023 
1024 	oldname = thread->pt_name;
1025 	thread->pt_name = cp;
1026 
1027 	pthread_spinunlock(self, &thread->pt_join_lock);
1028 
1029 	if (oldname != NULL)
1030 		free(oldname);
1031 
1032 	return 0;
1033 }
1034 
1035 
1036 
1037 /*
1038  * XXX There should be a way for applications to use the efficent
1039  *  inline version, but there are opacity/namespace issues.
1040  */
1041 pthread_t
1042 pthread_self(void)
1043 {
1044 
1045 	return pthread__self();
1046 }
1047 
1048 
1049 int
1050 pthread_cancel(pthread_t thread)
1051 {
1052 	pthread_t self;
1053 
1054 	self = pthread__self();
1055 #ifdef ERRORCHECK
1056 	if (pthread__find(self, thread) != 0)
1057 		return ESRCH;
1058 #endif
1059 #ifdef PTHREAD_SA
1060 	if (!(thread->pt_state == PT_STATE_RUNNING ||
1061 	    thread->pt_state == PT_STATE_RUNNABLE ||
1062 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE))
1063 		return ESRCH;
1064 
1065 	pthread_spinlock(self, &thread->pt_flaglock);
1066 	thread->pt_flags |= PT_FLAG_CS_PENDING;
1067 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
1068 		thread->pt_cancel = 1;
1069 		pthread_spinunlock(self, &thread->pt_flaglock);
1070 		pthread_spinlock(self, &thread->pt_statelock);
1071 		if (thread->pt_blockgen != thread->pt_unblockgen) {
1072 			/*
1073 			 * It's sleeping in the kernel. If we can wake
1074 			 * it up, it will notice the cancellation when
1075 			 * it returns. If it doesn't wake up when we
1076 			 * make this call, then it's blocked
1077 			 * uninterruptably in the kernel, and there's
1078 			 * not much to be done about it.
1079 			 */
1080 			_lwp_wakeup(thread->pt_blockedlwp);
1081 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
1082 			/*
1083 			 * We're blocked somewhere (pthread__block()
1084 			 * was called). Cause it to wake up; it will
1085 			 * check for the cancellation if the routine
1086 			 * is a cancellation point, and loop and reblock
1087 			 * otherwise.
1088 			 */
1089 			pthread_spinlock(self, thread->pt_sleeplock);
1090 			PTQ_REMOVE(thread->pt_sleepq, thread,
1091 			    pt_sleep);
1092 			pthread_spinunlock(self, thread->pt_sleeplock);
1093 			pthread__sched(self, thread);
1094 		} else {
1095 			/*
1096 			 * Nothing. The target thread is running and will
1097 			 * notice at the next deferred cancellation point.
1098 			 */
1099 		}
1100 		pthread_spinunlock(self, &thread->pt_statelock);
1101 	} else
1102 		pthread_spinunlock(self, &thread->pt_flaglock);
1103 #else
1104 	pthread_spinlock(self, &thread->pt_flaglock);
1105 	thread->pt_flags |= PT_FLAG_CS_PENDING;
1106 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
1107 		thread->pt_cancel = 1;
1108 		pthread_spinunlock(self, &thread->pt_flaglock);
1109 		_lwp_wakeup(thread->pt_lid);
1110 	} else
1111 		pthread_spinunlock(self, &thread->pt_flaglock);
1112 #endif
1113 
1114 	return 0;
1115 }
1116 
1117 
1118 int
1119 pthread_setcancelstate(int state, int *oldstate)
1120 {
1121 	pthread_t self;
1122 	int retval;
1123 
1124 	self = pthread__self();
1125 	retval = 0;
1126 
1127 	pthread_spinlock(self, &self->pt_flaglock);
1128 	if (oldstate != NULL) {
1129 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
1130 			*oldstate = PTHREAD_CANCEL_DISABLE;
1131 		else
1132 			*oldstate = PTHREAD_CANCEL_ENABLE;
1133 	}
1134 
1135 	if (state == PTHREAD_CANCEL_DISABLE) {
1136 		self->pt_flags |= PT_FLAG_CS_DISABLED;
1137 		if (self->pt_cancel) {
1138 			self->pt_flags |= PT_FLAG_CS_PENDING;
1139 			self->pt_cancel = 0;
1140 		}
1141 	} else if (state == PTHREAD_CANCEL_ENABLE) {
1142 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
1143 		/*
1144 		 * If a cancellation was requested while cancellation
1145 		 * was disabled, note that fact for future
1146 		 * cancellation tests.
1147 		 */
1148 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
1149 			self->pt_cancel = 1;
1150 			/* This is not a deferred cancellation point. */
1151 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
1152 				pthread_spinunlock(self, &self->pt_flaglock);
1153 				pthread_exit(PTHREAD_CANCELED);
1154 			}
1155 		}
1156 	} else
1157 		retval = EINVAL;
1158 
1159 	pthread_spinunlock(self, &self->pt_flaglock);
1160 	return retval;
1161 }
1162 
1163 
1164 int
1165 pthread_setcanceltype(int type, int *oldtype)
1166 {
1167 	pthread_t self;
1168 	int retval;
1169 
1170 	self = pthread__self();
1171 	retval = 0;
1172 
1173 	pthread_spinlock(self, &self->pt_flaglock);
1174 
1175 	if (oldtype != NULL) {
1176 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
1177 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
1178 		else
1179 			*oldtype = PTHREAD_CANCEL_DEFERRED;
1180 	}
1181 
1182 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1183 		self->pt_flags |= PT_FLAG_CS_ASYNC;
1184 		if (self->pt_cancel) {
1185 			pthread_spinunlock(self, &self->pt_flaglock);
1186 			pthread_exit(PTHREAD_CANCELED);
1187 		}
1188 	} else if (type == PTHREAD_CANCEL_DEFERRED)
1189 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
1190 	else
1191 		retval = EINVAL;
1192 
1193 	pthread_spinunlock(self, &self->pt_flaglock);
1194 	return retval;
1195 }
1196 
1197 
1198 void
1199 pthread_testcancel()
1200 {
1201 	pthread_t self;
1202 
1203 	self = pthread__self();
1204 	if (self->pt_cancel)
1205 		pthread_exit(PTHREAD_CANCELED);
1206 }
1207 
1208 
1209 /*
1210  * POSIX requires that certain functions return an error rather than
1211  * invoking undefined behavior even when handed completely bogus
1212  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
1213  * utility routine searches the list of threads for the pthread_t
1214  * value without dereferencing it.
1215  */
1216 int
1217 pthread__find(pthread_t self, pthread_t id)
1218 {
1219 	pthread_t target;
1220 
1221 	pthread_spinlock(self, &pthread__allqueue_lock);
1222 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
1223 	    if (target == id)
1224 		    break;
1225 	pthread_spinunlock(self, &pthread__allqueue_lock);
1226 
1227 	if (target == NULL)
1228 		return ESRCH;
1229 
1230 	return 0;
1231 }
1232 
1233 
1234 void
1235 pthread__testcancel(pthread_t self)
1236 {
1237 
1238 	if (self->pt_cancel)
1239 		pthread_exit(PTHREAD_CANCELED);
1240 }
1241 
1242 
1243 void
1244 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
1245 {
1246 	pthread_t self;
1247 	struct pt_clean_t *entry;
1248 
1249 	self = pthread__self();
1250 	entry = store;
1251 	entry->ptc_cleanup = cleanup;
1252 	entry->ptc_arg = arg;
1253 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1254 }
1255 
1256 
1257 void
1258 pthread__cleanup_pop(int ex, void *store)
1259 {
1260 	pthread_t self;
1261 	struct pt_clean_t *entry;
1262 
1263 	self = pthread__self();
1264 	entry = store;
1265 
1266 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
1267 	if (ex)
1268 		(*entry->ptc_cleanup)(entry->ptc_arg);
1269 }
1270 
1271 
1272 int *
1273 pthread__errno(void)
1274 {
1275 	pthread_t self;
1276 
1277 	self = pthread__self();
1278 
1279 	return &(self->pt_errno);
1280 }
1281 
1282 ssize_t	_sys_write(int, const void *, size_t);
1283 
1284 void
1285 pthread__assertfunc(const char *file, int line, const char *function,
1286 		    const char *expr)
1287 {
1288 	char buf[1024];
1289 	int len;
1290 
1291 	SDPRINTF(("(af)\n"));
1292 
1293 	/*
1294 	 * snprintf should not acquire any locks, or we could
1295 	 * end up deadlocked if the assert caller held locks.
1296 	 */
1297 	len = snprintf(buf, 1024,
1298 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
1299 	    expr, file, line,
1300 	    function ? ", function \"" : "",
1301 	    function ? function : "",
1302 	    function ? "\"" : "");
1303 
1304 	_sys_write(STDERR_FILENO, buf, (size_t)len);
1305 	(void)kill(getpid(), SIGABRT);
1306 
1307 	_exit(1);
1308 }
1309 
1310 
1311 void
1312 pthread__errorfunc(const char *file, int line, const char *function,
1313 		   const char *msg)
1314 {
1315 	char buf[1024];
1316 	size_t len;
1317 
1318 	if (pthread__diagassert == 0)
1319 		return;
1320 
1321 	/*
1322 	 * snprintf should not acquire any locks, or we could
1323 	 * end up deadlocked if the assert caller held locks.
1324 	 */
1325 	len = snprintf(buf, 1024,
1326 	    "%s: Error detected by libpthread: %s.\n"
1327 	    "Detected by file \"%s\", line %d%s%s%s.\n"
1328 	    "See pthread(3) for information.\n",
1329 	    getprogname(), msg, file, line,
1330 	    function ? ", function \"" : "",
1331 	    function ? function : "",
1332 	    function ? "\"" : "");
1333 
1334 	if (pthread__diagassert & DIAGASSERT_STDERR)
1335 		_sys_write(STDERR_FILENO, buf, len);
1336 
1337 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
1338 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
1339 
1340 	if (pthread__diagassert & DIAGASSERT_ABORT) {
1341 		(void)kill(getpid(), SIGABRT);
1342 		_exit(1);
1343 	}
1344 }
1345 
1346 #ifndef PTHREAD_SA
1347 
1348 /*
1349  * Thread park/unpark operations.  The kernel operations are
1350  * modelled after a brief description from "Multithreading in
1351  * the Solaris Operating Environment":
1352  *
1353  * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf
1354  */
1355 
1356 #define	OOPS(msg)			\
1357     pthread__errorfunc(__FILE__, __LINE__, __func__, msg)
1358 
1359 int
1360 pthread__park(pthread_t self, pthread_spin_t *lock,
1361 	      void *obj, struct pthread_queue_t *queue,
1362 	      const struct timespec *abstime, int tail,
1363 	      int cancelpt)
1364 {
1365 	int rv;
1366 
1367 	SDPRINTF(("(pthread__park %p) obj %p enter\n", self, obj));
1368 
1369 	/*
1370 	 * Enter the object's queue.
1371 	 */
1372 	if (queue != NULL) {
1373 		if (tail)
1374 			PTQ_INSERT_TAIL(queue, self, pt_sleep);
1375 		else
1376 			PTQ_INSERT_HEAD(queue, self, pt_sleep);
1377 		self->pt_sleeponq = 1;
1378 	}
1379 	self->pt_sleepobj = obj;
1380 
1381 	/*
1382 	 * Wait until we are awoken by a pending unpark operation,
1383 	 * a signal, an unpark posted after we have gone asleep,
1384 	 * or an expired timeout.
1385 	 */
1386 	rv = 0;
1387 	do {
1388 		pthread_spinunlock(self, lock);
1389 		if (_lwp_park(abstime, NULL, obj) != 0) {
1390 			switch (rv = errno) {
1391 			case EINTR:
1392 				/* Check for cancellation. */
1393 				if (cancelpt && self->pt_cancel)
1394 					break;
1395 				/* FALLTHROUGH */
1396 			case EALREADY:
1397 				rv = 0;
1398 				break;
1399 			case ETIMEDOUT:
1400 				break;
1401 			default:
1402 				OOPS("_lwp_park failed");
1403 				SDPRINTF(("(pthread__park %p) syscall rv=%d\n",
1404 				    self, rv));
1405 				break;
1406 			}
1407 		}
1408 		pthread_spinlock(self, lock);
1409 	} while (self->pt_sleepobj != NULL && rv == 0);
1410 
1411 	/*
1412 	 * If we have been awoken early but are still on the queue,
1413 	 * then remove ourself.
1414 	 */
1415 	if (queue != NULL && self->pt_sleeponq)
1416 		PTQ_REMOVE(queue, self, pt_sleep);
1417 	self->pt_sleepobj = NULL;
1418 	self->pt_sleeponq = 0;
1419 
1420 	SDPRINTF(("(pthread__park %p) obj %p exit\n", self, obj));
1421 
1422 	return rv;
1423 }
1424 
1425 void
1426 pthread__unpark(pthread_t self, pthread_spin_t *lock, void *obj,
1427 		pthread_t target)
1428 {
1429 	int rv;
1430 
1431 	if (target != NULL) {
1432 		SDPRINTF(("(pthread__unpark %p) obj %p target %p\n", self, obj,
1433 		    target));
1434 
1435 		/*
1436 		 * Easy: the thread has already been removed from
1437 		 * the queue, so just awaken it.
1438 		 */
1439 		target->pt_sleepobj = NULL;
1440 		target->pt_sleeponq = 0;
1441 		pthread_spinunlock(self, lock);
1442 		rv = _lwp_unpark(target->pt_lid, obj);
1443 
1444 		if (rv != 0 && errno != EALREADY && errno != EINTR) {
1445 			SDPRINTF(("(pthread__unpark %p) syscall rv=%d\n",
1446 			    self, rv));
1447 			OOPS("_lwp_unpark failed");
1448 		}
1449 	} else
1450 		pthread_spinunlock(self, lock);
1451 }
1452 
1453 void
1454 pthread__unpark_all(pthread_t self, pthread_spin_t *lock, void *obj,
1455 		    struct pthread_queue_t *queue)
1456 {
1457 	lwpid_t waiters[PTHREAD__UNPARK_MAX];
1458 	int n, rv;
1459 	pthread_t thread, next;
1460 
1461 	if (PTQ_EMPTY(queue)) {
1462 		pthread_spinunlock(self, lock);
1463 		return;
1464 	}
1465 
1466 	/*
1467 	 * First, clear all sleepobj pointers, since we can release the
1468 	 * spin lock before awkening everybody, and must synchronise with
1469 	 * pthread__park().
1470 	 */
1471 	PTQ_FOREACH(thread, queue, pt_sleep) {
1472 		thread->pt_sleepobj = NULL;
1473 	}
1474 
1475 	for (;;) {
1476 		thread = PTQ_FIRST(queue);
1477 		for (n = 0; n < pthread__unpark_max && thread != NULL;
1478 		    thread = next) {
1479 			/*
1480 			 * If the sleepobj pointer is non-NULL, it
1481 			 * means one of two things:
1482 			 *
1483 			 * o The thread has awoken early, spun
1484 			 *   through application code and is
1485 			 *   once more asleep on this object.
1486 			 *
1487 			 * o This is a new thread that has blocked
1488 			 *   on the object after we have released
1489 			 *   the interlock in this loop.
1490 			 *
1491 			 * In both cases we shouldn't remove the
1492 			 * thread from the queue.
1493 			 *
1494 			 * XXXLWP basic fairness issues here.
1495 			 */
1496 			next = PTQ_NEXT(thread, pt_sleep);
1497 			if (thread->pt_sleepobj != NULL)
1498 			    	continue;
1499 			thread->pt_sleeponq = 0;
1500 			waiters[n++] = thread->pt_lid;
1501 			PTQ_REMOVE(queue, thread, pt_sleep);
1502 			SDPRINTF(("(pthread__unpark_all %p) obj %p "
1503 			    "unpark %p\n", self, obj, thread));
1504 		}
1505 
1506 		pthread_spinunlock(self, lock);
1507 		switch (n) {
1508 		case 0:
1509 			return;
1510 		case 1:
1511 			rv = _lwp_unpark(waiters[0], obj);
1512 			if (rv != 0 && errno != EALREADY && errno != EINTR) {
1513 				OOPS("_lwp_unpark failed");
1514 				SDPRINTF(("(pthread__unpark_all %p) "
1515 				    "syscall rv=%d\n", self, rv));
1516 			}
1517 			return;
1518 		default:
1519 			rv = _lwp_unpark_all(waiters, n, obj);
1520 			if (rv != 0 && errno != EINTR) {
1521 				OOPS("_lwp_unpark_all failed");
1522 				SDPRINTF(("(pthread__unpark_all %p) "
1523 				    "syscall rv=%d\n", self, rv));
1524 			}
1525 			break;
1526 		}
1527 		pthread_spinlock(self, lock);
1528 	}
1529 }
1530 
1531 #undef	OOPS
1532 
1533 #endif	/* !PTHREAD_SA */
1534