xref: /netbsd-src/lib/libpthread/pthread.c (revision 10058f19c8c903821163f779cb11092c77eedb32)
1 /*	$NetBSD: pthread.c,v 1.50 2006/12/23 05:18:56 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2003, 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.50 2006/12/23 05:18:56 ad Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <syslog.h>
50 #include <ucontext.h>
51 #include <unistd.h>
52 #include <sys/param.h>
53 #include <sys/sysctl.h>
54 #ifdef PTHREAD_MLOCK_KLUDGE
55 #include <sys/mman.h>
56 #endif
57 
58 #include <sched.h>
59 #include "pthread.h"
60 #include "pthread_int.h"
61 
62 #ifdef PTHREAD_MAIN_DEBUG
63 #define SDPRINTF(x) DPRINTF(x)
64 #else
65 #define SDPRINTF(x)
66 #endif
67 
68 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
69 static void	pthread__dead(pthread_t, pthread_t);
70 
71 int pthread__started;
72 
73 pthread_spin_t pthread__allqueue_lock = __SIMPLELOCK_UNLOCKED;
74 struct pthread_queue_t pthread__allqueue;
75 
76 pthread_spin_t pthread__deadqueue_lock = __SIMPLELOCK_UNLOCKED;
77 struct pthread_queue_t pthread__deadqueue;
78 struct pthread_queue_t *pthread__reidlequeue;
79 
80 static int nthreads;
81 static int nextthread;
82 static pthread_spin_t nextthread_lock = __SIMPLELOCK_UNLOCKED;
83 static pthread_attr_t pthread_default_attr;
84 
85 enum {
86 	DIAGASSERT_ABORT =	1<<0,
87 	DIAGASSERT_STDERR =	1<<1,
88 	DIAGASSERT_SYSLOG =	1<<2
89 };
90 
91 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
92 
93 #ifdef PTHREAD_SA
94 pthread_spin_t pthread__runqueue_lock = __SIMPLELOCK_UNLOCKED;
95 struct pthread_queue_t pthread__runqueue;
96 struct pthread_queue_t pthread__idlequeue;
97 struct pthread_queue_t pthread__suspqueue;
98 
99 int pthread__concurrency, pthread__maxconcurrency;
100 
101 int _sys___sigprocmask14(int, const sigset_t *, sigset_t *);
102 #endif
103 
104 __strong_alias(__libc_thr_self,pthread_self)
105 __strong_alias(__libc_thr_create,pthread_create)
106 __strong_alias(__libc_thr_exit,pthread_exit)
107 __strong_alias(__libc_thr_errno,pthread__errno)
108 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
109 
110 /*
111  * Static library kludge.  Place a reference to a symbol any library
112  * file which does not already have a reference here.
113  */
114 extern int pthread__cancel_stub_binder;
115 #ifdef PTHREAD_SA
116 extern int pthread__sched_binder;
117 extern struct pthread_queue_t pthread__nanosleeping;
118 #endif
119 
120 void *pthread__static_lib_binder[] = {
121 	&pthread__cancel_stub_binder,
122 	pthread_cond_init,
123 	pthread_mutex_init,
124 	pthread_rwlock_init,
125 	pthread_barrier_init,
126 	pthread_key_create,
127 	pthread_setspecific,
128 #ifdef PTHREAD_SA
129 	&pthread__sched_binder,
130 	&pthread__nanosleeping
131 #endif
132 };
133 
134 /*
135  * This needs to be started by the library loading code, before main()
136  * gets to run, for various things that use the state of the initial thread
137  * to work properly (thread-specific data is an application-visible example;
138  * spinlock counts for mutexes is an internal example).
139  */
140 void
141 pthread_init(void)
142 {
143 	pthread_t first;
144 	char *p;
145 	int mib[2], ncpu;
146 	size_t len;
147 	extern int __isthreaded;
148 #ifdef PTHREAD_MLOCK_KLUDGE
149 	int ret;
150 #endif
151 #ifdef PTHREAD_SA
152 	int i;
153 #endif
154 
155 	mib[0] = CTL_HW;
156 	mib[1] = HW_NCPU;
157 
158 	len = sizeof(ncpu);
159 	sysctl(mib, 2, &ncpu, &len, NULL, 0);
160 
161 	/* Initialize locks first; they're needed elsewhere. */
162 	pthread__lockprim_init(ncpu);
163 
164 #ifdef PTHREAD_SA
165 	/* Find out requested/possible concurrency */
166 	p = getenv("PTHREAD_CONCURRENCY");
167 	pthread__maxconcurrency = p ? atoi(p) : 1;
168 
169 	if (pthread__maxconcurrency < 1)
170 		pthread__maxconcurrency = 1;
171 	if (pthread__maxconcurrency > ncpu)
172 		pthread__maxconcurrency = ncpu;
173 
174 	/* Allocate data structures */
175 	pthread__reidlequeue = (struct pthread_queue_t *)malloc
176 		(pthread__maxconcurrency * sizeof(struct pthread_queue_t));
177 	if (pthread__reidlequeue == NULL)
178 		err(1, "Couldn't allocate memory for pthread__reidlequeue");
179 #endif
180 
181 	/* Basic data structure setup */
182 	pthread_attr_init(&pthread_default_attr);
183 	PTQ_INIT(&pthread__allqueue);
184 	PTQ_INIT(&pthread__deadqueue);
185 #ifdef PTHREAD_MLOCK_KLUDGE
186 	ret = mlock(&pthread__deadqueue, sizeof(pthread__deadqueue));
187 	pthread__assert(ret == 0);
188 #endif
189 #ifdef PTHREAD_SA
190 	PTQ_INIT(&pthread__runqueue);
191 	PTQ_INIT(&pthread__idlequeue);
192 	for (i = 0; i < pthread__maxconcurrency; i++)
193 		PTQ_INIT(&pthread__reidlequeue[i]);
194 	nthreads = 1;
195 #endif
196 	/* Create the thread structure corresponding to main() */
197 	pthread__initmain(&first);
198 	pthread__initthread(first, first);
199 
200 	first->pt_state = PT_STATE_RUNNING;
201 #ifdef PTHREAD_SA
202 	_sys___sigprocmask14(0, NULL, &first->pt_sigmask);
203 #else
204 	first->pt_lid = _lwp_self();
205 #endif
206 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
207 
208 	/* Start subsystems */
209 #ifdef PTHREAD_SA
210 	pthread__signal_init();
211 #endif
212 	PTHREAD_MD_INIT
213 #ifdef PTHREAD__DEBUG
214 	pthread__debug_init(ncpu);
215 #endif
216 
217 	for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
218 		switch (*p) {
219 		case 'a':
220 			pthread__diagassert |= DIAGASSERT_ABORT;
221 			break;
222 		case 'A':
223 			pthread__diagassert &= ~DIAGASSERT_ABORT;
224 			break;
225 		case 'e':
226 			pthread__diagassert |= DIAGASSERT_STDERR;
227 			break;
228 		case 'E':
229 			pthread__diagassert &= ~DIAGASSERT_STDERR;
230 			break;
231 		case 'l':
232 			pthread__diagassert |= DIAGASSERT_SYSLOG;
233 			break;
234 		case 'L':
235 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
236 			break;
237 		}
238 	}
239 
240 
241 	/* Tell libc that we're here and it should role-play accordingly. */
242 	__isthreaded = 1;
243 }
244 
245 static void
246 pthread__child_callback(void)
247 {
248 	/*
249 	 * Clean up data structures that a forked child process might
250 	 * trip over. Note that if threads have been created (causing
251 	 * this handler to be registered) the standards say that the
252 	 * child will trigger undefined behavior if it makes any
253 	 * pthread_* calls (or any other calls that aren't
254 	 * async-signal-safe), so we don't really have to clean up
255 	 * much. Anything that permits some pthread_* calls to work is
256 	 * merely being polite.
257 	 */
258 	pthread__started = 0;
259 }
260 
261 static void
262 pthread__start(void)
263 {
264 	pthread_t self;
265 #ifdef PTHREAD_SA
266 	pthread_t idle;
267 	int i, ret;
268 #endif
269 
270 	self = pthread__self(); /* should be the "main()" thread */
271 
272 	/*
273 	 * Per-process timers are cleared by fork(); despite the
274 	 * various restrictions on fork() and threads, it's legal to
275 	 * fork() before creating any threads.
276 	 */
277 #ifdef PTHREAD_SA
278 	pthread__alarm_init();
279 
280 	pthread__signal_start();
281 #endif
282 
283 	pthread_atfork(NULL, NULL, pthread__child_callback);
284 
285 #ifdef PTHREAD_SA
286 	/*
287 	 * Create idle threads
288 	 * XXX need to create more idle threads if concurrency > 3
289 	 */
290 	for (i = 0; i < NIDLETHREADS; i++) {
291 		ret = pthread__stackalloc(&idle);
292 		if (ret != 0)
293 			err(1, "Couldn't allocate stack for idle thread!");
294 		pthread__initthread(self, idle);
295 		sigfillset(&idle->pt_sigmask);
296 		idle->pt_type = PT_THREAD_IDLE;
297 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
298 		pthread__sched_idle(self, idle);
299 	}
300 
301 	/* Start up the SA subsystem */
302 	pthread__sa_start();
303 #endif
304 
305 	SDPRINTF(("(pthread__start %p) Started.\n", self));
306 }
307 
308 
309 /* General-purpose thread data structure sanitization. */
310 void
311 pthread__initthread(pthread_t self, pthread_t t)
312 {
313 	int id;
314 
315 	pthread_spinlock(self, &nextthread_lock);
316 	id = nextthread;
317 	nextthread++;
318 	pthread_spinunlock(self, &nextthread_lock);
319 	t->pt_num = id;
320 
321 	t->pt_magic = PT_MAGIC;
322 	pthread_lockinit(&t->pt_flaglock);
323 	t->pt_spinlocks = 0;
324 	t->pt_exitval = NULL;
325 	t->pt_flags = 0;
326 	t->pt_cancel = 0;
327 	t->pt_errno = 0;
328 
329 #ifdef PTHREAD_SA
330 	t->pt_type = PT_THREAD_NORMAL;
331 	t->pt_state = PT_STATE_RUNNABLE;
332 	t->pt_heldlock = NULL;
333 	t->pt_next = NULL;
334 	t->pt_parent = NULL;
335 	t->pt_switchto = NULL;
336 	t->pt_trapuc = NULL;
337 	sigemptyset(&t->pt_siglist);
338 	sigemptyset(&t->pt_sigmask);
339 	pthread_lockinit(&t->pt_siglock);
340 #else
341 	t->pt_state = PT_STATE_RUNNING;
342 #endif
343 
344 	pthread_lockinit(&t->pt_statelock);
345 
346 	PTQ_INIT(&t->pt_joiners);
347 	pthread_lockinit(&t->pt_join_lock);
348 	PTQ_INIT(&t->pt_cleanup_stack);
349 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
350 	t->pt_name = NULL;
351 
352 #if defined(PTHREAD__DEBUG) && defined(PTHREAD_SA)
353 	t->blocks = 0;
354 	t->preempts = 0;
355 	t->rescheds = 0;
356 #endif
357 }
358 
359 
360 int
361 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
362 	    void *(*startfunc)(void *), void *arg)
363 {
364 	pthread_t self, newthread;
365 	pthread_attr_t nattr;
366 	struct pthread_attr_private *p;
367 	char *name;
368 	int ret;
369 #ifndef PTHREAD_SA
370 	int flag;
371 #endif
372 
373 	PTHREADD_ADD(PTHREADD_CREATE);
374 
375 	/*
376 	 * It's okay to check this without a lock because there can
377 	 * only be one thread before it becomes true.
378 	 */
379 	if (pthread__started == 0) {
380 		pthread__start();
381 		pthread__started = 1;
382 	}
383 
384 	if (attr == NULL)
385 		nattr = pthread_default_attr;
386 	else if (attr->pta_magic == PT_ATTR_MAGIC)
387 		nattr = *attr;
388 	else
389 		return EINVAL;
390 
391 	/* Fetch misc. attributes from the attr structure. */
392 	name = NULL;
393 	if ((p = nattr.pta_private) != NULL)
394 		if (p->ptap_name[0] != '\0')
395 			if ((name = strdup(p->ptap_name)) == NULL)
396 				return ENOMEM;
397 
398 	self = pthread__self();
399 
400 	pthread_spinlock(self, &pthread__deadqueue_lock);
401 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
402 		newthread = PTQ_FIRST(&pthread__deadqueue);
403 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
404 		pthread_spinunlock(self, &pthread__deadqueue_lock);
405 	} else {
406 		pthread_spinunlock(self, &pthread__deadqueue_lock);
407 		/* Set up a stack and allocate space for a pthread_st. */
408 		ret = pthread__stackalloc(&newthread);
409 		if (ret != 0) {
410 			if (name)
411 				free(name);
412 			return ret;
413 		}
414 	}
415 
416 	/* 2. Set up state. */
417 	pthread__initthread(self, newthread);
418 	newthread->pt_flags = nattr.pta_flags;
419 #ifdef PTHREAD_SA
420 	newthread->pt_sigmask = self->pt_sigmask;
421 #endif
422 
423 	/* 3. Set up misc. attributes. */
424 	newthread->pt_name = name;
425 
426 	/*
427 	 * 4. Set up context.
428 	 *
429 	 * The pt_uc pointer points to a location safely below the
430 	 * stack start; this is arranged by pthread__stackalloc().
431 	 */
432 	_INITCONTEXT_U(newthread->pt_uc);
433 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
434 	pthread__uc_id(newthread->pt_uc) = newthread;
435 #endif
436 	newthread->pt_uc->uc_stack = newthread->pt_stack;
437 	newthread->pt_uc->uc_link = NULL;
438 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
439 	    startfunc, arg);
440 
441 #ifndef PTHREAD_SA
442 	/* 4a. Create the new LWP. */
443 	flag = (newthread->pt_flags & PT_FLAG_SUSPENDED) ? LWP_SUSPENDED : 0;
444 	ret = _lwp_create(newthread->pt_uc, (u_long)flag, &newthread->pt_lid);
445 	if (ret != 0) {
446 		SDPRINTF(("(pthread_create %p) _lwp_create: %s\n",
447 		    strerror(errno)));
448 		/* XXXLWP what else? */
449 		free(name);
450 		return ret;
451 	}
452 #endif
453 
454 	/* 5. Add to list of all threads. */
455 	pthread_spinlock(self, &pthread__allqueue_lock);
456 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
457 	nthreads++;
458 	pthread_spinunlock(self, &pthread__allqueue_lock);
459 
460 #ifdef PTHREAD_SA
461 	SDPRINTF(("(pthread_create %p) new thread %p (name pointer %p).\n",
462 		  self, newthread, newthread->pt_name));
463 	/* 6. Put on appropriate queue. */
464 	if (newthread->pt_flags & PT_FLAG_SUSPENDED) {
465 		pthread_spinlock(self, &newthread->pt_statelock);
466 		pthread__suspend(self, newthread);
467 		pthread_spinunlock(self, &newthread->pt_statelock);
468 	} else
469 		pthread__sched(self, newthread);
470 #else
471 	SDPRINTF(("(pthread_create %p) new thread %p (name %p, lid %d).\n",
472 		  self, newthread, newthread->pt_name,
473 		  (int)newthread->pt_lid));
474 #endif
475 
476 	*thread = newthread;
477 
478 	return 0;
479 }
480 
481 
482 static void
483 pthread__create_tramp(void *(*start)(void *), void *arg)
484 {
485 	void *retval;
486 
487 	retval = (*start)(arg);
488 
489 	pthread_exit(retval);
490 
491 	/*NOTREACHED*/
492 	pthread__abort();
493 }
494 
495 int
496 pthread_suspend_np(pthread_t thread)
497 {
498 	pthread_t self;
499 
500 	self = pthread__self();
501 	if (self == thread) {
502 		return EDEADLK;
503 	}
504 #ifdef ERRORCHECK
505 	if (pthread__find(self, thread) != 0)
506 		return ESRCH;
507 #endif
508 #ifdef PTHREAD_SA
509 	SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p (state %d).\n",
510 		     self, thread, thread->pt_state));
511 	pthread_spinlock(self, &thread->pt_statelock);
512 	if (thread->pt_blockgen != thread->pt_unblockgen) {
513 		/* XXX flaglock? */
514 		thread->pt_flags |= PT_FLAG_SUSPENDED;
515 		pthread_spinunlock(self, &thread->pt_statelock);
516 		return 0;
517 	}
518 	switch (thread->pt_state) {
519 	case PT_STATE_RUNNING:
520 		pthread__abort();	/* XXX */
521 		break;
522 	case PT_STATE_SUSPENDED:
523 		pthread_spinunlock(self, &thread->pt_statelock);
524 		return 0;
525 	case PT_STATE_RUNNABLE:
526 		pthread_spinlock(self, &pthread__runqueue_lock);
527 		PTQ_REMOVE(&pthread__runqueue, thread, pt_runq);
528 		pthread_spinunlock(self, &pthread__runqueue_lock);
529 		break;
530 	case PT_STATE_BLOCKED_QUEUE:
531 		pthread_spinlock(self, thread->pt_sleeplock);
532 		PTQ_REMOVE(thread->pt_sleepq, thread, pt_sleep);
533 		pthread_spinunlock(self, thread->pt_sleeplock);
534 		break;
535 	case PT_STATE_ZOMBIE:
536 		goto out;
537 	default:
538 		break;			/* XXX */
539 	}
540 	pthread__suspend(self, thread);
541 
542 out:
543 	pthread_spinunlock(self, &thread->pt_statelock);
544 	return 0;
545 #else
546 	SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p.\n",
547 		     self, thread));
548 	return _lwp_suspend(thread->pt_lid);
549 #endif
550 }
551 
552 int
553 pthread_resume_np(pthread_t thread)
554 {
555 	pthread_t self;
556 
557 	self = pthread__self();
558 #ifdef ERRORCHECK
559 	if (pthread__find(self, thread) != 0)
560 		return ESRCH;
561 #endif
562 #ifdef PTHREAD_SA
563 	SDPRINTF(("(pthread_resume_np %p) Resume thread %p (state %d).\n",
564 		     self, thread, thread->pt_state));
565 	pthread_spinlock(self, &thread->pt_statelock);
566 	/* XXX flaglock? */
567 	thread->pt_flags &= ~PT_FLAG_SUSPENDED;
568 	if (thread->pt_state == PT_STATE_SUSPENDED) {
569 		pthread_spinlock(self, &pthread__runqueue_lock);
570 		PTQ_REMOVE(&pthread__suspqueue, thread, pt_runq);
571 		pthread_spinunlock(self, &pthread__runqueue_lock);
572 		pthread__sched(self, thread);
573 	}
574 	pthread_spinunlock(self, &thread->pt_statelock);
575 	return 0;
576 #else
577 	SDPRINTF(("(pthread_resume_np %p) Resume thread %p.\n",
578 		     self, thread));
579 	return _lwp_continue(thread->pt_lid);
580 #endif
581 }
582 
583 #ifdef PTHREAD_SA
584 /*
585  * Other threads will switch to the idle thread so that they
586  * can dispose of any awkward locks or recycle upcall state.
587  */
588 void
589 pthread__idle(void)
590 {
591 	pthread_t self;
592 
593 	PTHREADD_ADD(PTHREADD_IDLE);
594 	self = pthread__self();
595 	SDPRINTF(("(pthread__idle %p).\n", self));
596 
597 	/*
598 	 * The drill here is that we want to yield the processor,
599 	 * but for the thread itself to be recovered, we need to be on
600 	 * a list somewhere for the thread system to know about us.
601 	 */
602 	pthread_spinlock(self, &pthread__deadqueue_lock);
603 	PTQ_INSERT_TAIL(&pthread__reidlequeue[self->pt_vpid], self, pt_runq);
604 	pthread__concurrency--;
605 	SDPRINTF(("(yield %p concurrency) now %d\n", self,
606 		     pthread__concurrency));
607 	/* Don't need a flag lock; nothing else has a handle on this thread */
608 	self->pt_flags |= PT_FLAG_IDLED;
609 	pthread_spinunlock(self, &pthread__deadqueue_lock);
610 
611 	/*
612 	 * If we get to run this, then no preemption has happened
613 	 * (because the upcall handler will not continue an idle thread with
614 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
615 	 */
616 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
617 	sa_yield();
618 
619 	/* NOTREACHED */
620 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
621 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
622 	pthread__abort();
623 }
624 #endif
625 
626 
627 void
628 pthread_exit(void *retval)
629 {
630 	pthread_t self;
631 	struct pt_clean_t *cleanup;
632 	char *name;
633 #ifdef PTHREAD_SA
634 	int nt;
635 #endif
636 
637 	self = pthread__self();
638 	SDPRINTF(("(pthread_exit %p) status %p, flags %x, cancel %d\n",
639 		  self, retval, self->pt_flags, self->pt_cancel));
640 
641 	/* Disable cancellability. */
642 	pthread_spinlock(self, &self->pt_flaglock);
643 	self->pt_flags |= PT_FLAG_CS_DISABLED;
644 	self->pt_cancel = 0;
645 	pthread_spinunlock(self, &self->pt_flaglock);
646 
647 	/* Call any cancellation cleanup handlers */
648 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
649 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
650 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
651 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
652 	}
653 
654 	/* Perform cleanup of thread-specific data */
655 	pthread__destroy_tsd(self);
656 
657 	self->pt_exitval = retval;
658 
659 	/*
660 	 * it's safe to check PT_FLAG_DETACHED without pt_flaglock
661 	 * because it's only set by pthread_detach with pt_join_lock held.
662 	 */
663 	pthread_spinlock(self, &self->pt_join_lock);
664 	if (self->pt_flags & PT_FLAG_DETACHED) {
665 		self->pt_state = PT_STATE_DEAD;
666 		pthread_spinunlock(self, &self->pt_join_lock);
667 		name = self->pt_name;
668 		self->pt_name = NULL;
669 
670 		if (name != NULL)
671 			free(name);
672 
673 		pthread_spinlock(self, &pthread__allqueue_lock);
674 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
675 #ifdef PTHREAD_SA
676 		nthreads--;
677 		nt = nthreads;
678 		pthread_spinunlock(self, &pthread__allqueue_lock);
679 
680 		if (nt == 0) {
681 			/* Whoah, we're the last one. Time to go. */
682 			exit(0);
683 		}
684 
685 		/* Yeah, yeah, doing work while we're dead is tacky. */
686 		pthread_spinlock(self, &pthread__deadqueue_lock);
687 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
688 		pthread__block(self, &pthread__deadqueue_lock);
689 		SDPRINTF(("(pthread_exit %p) walking dead\n", self));
690 #else
691 		pthread_spinunlock(self, &pthread__allqueue_lock);
692 		/* XXXLWP recycle stack */
693 		_lwp_exit();
694 #endif
695 	} else {
696 		self->pt_state = PT_STATE_ZOMBIE;
697 
698 #ifdef PTHREAD_SA
699 		/* Note: name will be freed by the joiner. */
700 		pthread_spinlock(self, &pthread__allqueue_lock);
701 		nthreads--;
702 		nt = nthreads;
703 		pthread_spinunlock(self, &pthread__allqueue_lock);
704 		if (nt == 0) {
705 			/* Whoah, we're the last one. Time to go. */
706 			exit(0);
707 		}
708 		/*
709 		 * Wake up all the potential joiners. Only one can win.
710 		 * (Can you say "Thundering Herd"? I knew you could.)
711 		 */
712 		pthread__sched_sleepers(self, &self->pt_joiners);
713 		pthread__block(self, &self->pt_join_lock);
714 		SDPRINTF(("(pthread_exit %p) walking zombie\n", self));
715 #else
716 		pthread_spinunlock(self, &self->pt_join_lock);
717 		/* XXXLWP recycle stack */
718 		_lwp_exit();
719 #endif
720 	}
721 
722 	/*NOTREACHED*/
723 	pthread__abort();
724 	exit(1);
725 }
726 
727 
728 int
729 pthread_join(pthread_t thread, void **valptr)
730 {
731 	pthread_t self;
732 	char *name;
733 	int num, retval;
734 
735 	self = pthread__self();
736 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
737 
738 	if (pthread__find(self, thread) != 0)
739 		return ESRCH;
740 
741 	if (thread->pt_magic != PT_MAGIC)
742 		return EINVAL;
743 
744 	if (thread == self)
745 		return EDEADLK;
746 
747 #ifdef PTHREAD_SA
748 	pthread_spinlock(self, &thread->pt_flaglock);
749 
750 	if (thread->pt_flags & PT_FLAG_DETACHED) {
751 		pthread_spinunlock(self, &thread->pt_flaglock);
752 		return EINVAL;
753 	}
754 
755 	num = thread->pt_num;
756 	pthread_spinlock(self, &thread->pt_join_lock);
757 	while (thread->pt_state != PT_STATE_ZOMBIE) {
758 		if ((thread->pt_state == PT_STATE_DEAD) ||
759 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
760 		    (thread->pt_num != num)) {
761 			/*
762 			 * Another thread beat us to the join, or called
763 			 * pthread_detach(). If num didn't match, the
764 			 * thread died and was recycled before we got
765 			 * another chance to run.
766 			 */
767 			pthread_spinunlock(self, &thread->pt_join_lock);
768 			pthread_spinunlock(self, &thread->pt_flaglock);
769 			return ESRCH;
770 		}
771 		/*
772 		 * "I'm not dead yet!"
773 		 * "You will be soon enough."
774 		 */
775 		pthread_spinunlock(self, &thread->pt_flaglock);
776 		pthread_spinlock(self, &self->pt_statelock);
777 		if (self->pt_cancel) {
778 			pthread_spinunlock(self, &self->pt_statelock);
779 			pthread_spinunlock(self, &thread->pt_join_lock);
780 			pthread_exit(PTHREAD_CANCELED);
781 		}
782 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
783 		self->pt_sleepobj = thread;
784 		self->pt_sleepq = &thread->pt_joiners;
785 		self->pt_sleeplock = &thread->pt_join_lock;
786 		pthread_spinunlock(self, &self->pt_statelock);
787 
788 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
789 		pthread__block(self, &thread->pt_join_lock);
790 		pthread_spinlock(self, &thread->pt_flaglock);
791 		pthread_spinlock(self, &thread->pt_join_lock);
792 	}
793 
794 	/* All ours. */
795 	thread->pt_state = PT_STATE_DEAD;
796 	name = thread->pt_name;
797 	thread->pt_name = NULL;
798 	pthread_spinunlock(self, &thread->pt_join_lock);
799 	pthread_spinunlock(self, &thread->pt_flaglock);
800 
801 	if (valptr != NULL)
802 		*valptr = thread->pt_exitval;
803 
804 	retval = 0;
805 #else	/* PTHREAD_SA */
806 	retval = 0;
807 	name = NULL;
808  again:
809  	pthread_spinlock(self, &thread->pt_join_lock);
810 	switch (thread->pt_state) {
811 	case PT_STATE_RUNNING:
812 		pthread_spinunlock(self, &thread->pt_join_lock);
813 		retval = _lwp_wait(thread->pt_lid, &num);
814 		if (retval != 0)
815 			return retval;
816 		goto again;
817 	case PT_STATE_ZOMBIE:
818 		if (valptr != NULL)
819 			*valptr = thread->pt_exitval;	/* XXXLWP search for LWP */
820 		if (retval == 0) {
821 			name = thread->pt_name;
822 			thread->pt_name = NULL;
823 		}
824 		thread->pt_state = PT_STATE_DEAD;
825 		pthread_spinunlock(self, &thread->pt_join_lock);
826 		break;
827 	default:
828 		pthread_spinunlock(self, &thread->pt_join_lock);
829 		return EINVAL;
830 	}
831 #endif	/* PTHREAD_SA */
832 
833 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
834 
835 	pthread__dead(self, thread);
836 
837 	if (name != NULL)
838 		free(name);
839 
840 	return retval;
841 }
842 
843 
844 int
845 pthread_equal(pthread_t t1, pthread_t t2)
846 {
847 
848 	/* Nothing special here. */
849 	return (t1 == t2);
850 }
851 
852 
853 int
854 pthread_detach(pthread_t thread)
855 {
856 	pthread_t self;
857 #ifdef PTHREAD_SA
858 	int doreclaim = 0;
859 	char *name = NULL;
860 #endif
861 
862 	self = pthread__self();
863 
864 	if (pthread__find(self, thread) != 0)
865 		return ESRCH;
866 
867 	if (thread->pt_magic != PT_MAGIC)
868 		return EINVAL;
869 
870 #ifdef PTHREAD_SA
871 	pthread_spinlock(self, &thread->pt_flaglock);
872 	pthread_spinlock(self, &thread->pt_join_lock);
873 
874 	if (thread->pt_flags & PT_FLAG_DETACHED) {
875 		pthread_spinunlock(self, &thread->pt_join_lock);
876 		pthread_spinunlock(self, &thread->pt_flaglock);
877 		return EINVAL;
878 	}
879 
880 	thread->pt_flags |= PT_FLAG_DETACHED;
881 
882 	/* Any joiners have to be punted now. */
883 	pthread__sched_sleepers(self, &thread->pt_joiners);
884 
885 	if (thread->pt_state == PT_STATE_ZOMBIE) {
886 		thread->pt_state = PT_STATE_DEAD;
887 		name = thread->pt_name;
888 		thread->pt_name = NULL;
889 		doreclaim = 1;
890 	}
891 
892 	pthread_spinunlock(self, &thread->pt_join_lock);
893 	pthread_spinunlock(self, &thread->pt_flaglock);
894 
895 	if (doreclaim) {
896 		pthread__dead(self, thread);
897 		if (name != NULL)
898 			free(name);
899 	}
900 
901 	return 0;
902 #else
903 	return _lwp_detach(thread->pt_lid);
904 #endif
905 }
906 
907 
908 static void
909 pthread__dead(pthread_t self, pthread_t thread)
910 {
911 
912 	SDPRINTF(("(pthread__dead %p) Reclaimed %p.\n", self, thread));
913 	pthread__assert(thread != self);
914 	pthread__assert(thread->pt_state == PT_STATE_DEAD);
915 	pthread__assert(thread->pt_name == NULL);
916 
917 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
918 	pthread_spinlock(self, &pthread__allqueue_lock);
919 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
920 	pthread_spinunlock(self, &pthread__allqueue_lock);
921 
922 	pthread_spinlock(self, &pthread__deadqueue_lock);
923 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
924 	pthread_spinunlock(self, &pthread__deadqueue_lock);
925 }
926 
927 
928 int
929 pthread_getname_np(pthread_t thread, char *name, size_t len)
930 {
931 	pthread_t self;
932 
933 	self = pthread__self();
934 
935 	if (pthread__find(self, thread) != 0)
936 		return ESRCH;
937 
938 	if (thread->pt_magic != PT_MAGIC)
939 		return EINVAL;
940 
941 	pthread_spinlock(self, &thread->pt_join_lock);
942 	if (thread->pt_name == NULL)
943 		name[0] = '\0';
944 	else
945 		strlcpy(name, thread->pt_name, len);
946 	pthread_spinunlock(self, &thread->pt_join_lock);
947 
948 	return 0;
949 }
950 
951 
952 int
953 pthread_setname_np(pthread_t thread, const char *name, void *arg)
954 {
955 	pthread_t self;
956 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
957 	int namelen;
958 
959 	self = pthread__self();
960 	if (pthread__find(self, thread) != 0)
961 		return ESRCH;
962 
963 	if (thread->pt_magic != PT_MAGIC)
964 		return EINVAL;
965 
966 	namelen = snprintf(newname, sizeof(newname), name, arg);
967 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
968 		return EINVAL;
969 
970 	cp = strdup(newname);
971 	if (cp == NULL)
972 		return ENOMEM;
973 
974 	pthread_spinlock(self, &thread->pt_join_lock);
975 
976 #ifdef PTHREAD_SA
977 	if (thread->pt_state == PT_STATE_DEAD) {
978 		pthread_spinunlock(self, &thread->pt_join_lock);
979 		free(cp);
980 		return EINVAL;
981 	}
982 #endif
983 
984 	oldname = thread->pt_name;
985 	thread->pt_name = cp;
986 
987 	pthread_spinunlock(self, &thread->pt_join_lock);
988 
989 	if (oldname != NULL)
990 		free(oldname);
991 
992 	return 0;
993 }
994 
995 
996 
997 /*
998  * XXX There should be a way for applications to use the efficent
999  *  inline version, but there are opacity/namespace issues.
1000  */
1001 pthread_t
1002 pthread_self(void)
1003 {
1004 
1005 	return pthread__self();
1006 }
1007 
1008 
1009 int
1010 pthread_cancel(pthread_t thread)
1011 {
1012 	pthread_t self;
1013 
1014 	self = pthread__self();
1015 #ifdef ERRORCHECK
1016 	if (pthread__find(self, thread) != 0)
1017 		return ESRCH;
1018 #endif
1019 #ifdef PTHREAD_SA
1020 	if (!(thread->pt_state == PT_STATE_RUNNING ||
1021 	    thread->pt_state == PT_STATE_RUNNABLE ||
1022 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE))
1023 		return ESRCH;
1024 
1025 	pthread_spinlock(self, &thread->pt_flaglock);
1026 	thread->pt_flags |= PT_FLAG_CS_PENDING;
1027 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
1028 		thread->pt_cancel = 1;
1029 		pthread_spinunlock(self, &thread->pt_flaglock);
1030 		pthread_spinlock(self, &thread->pt_statelock);
1031 		if (thread->pt_blockgen != thread->pt_unblockgen) {
1032 			/*
1033 			 * It's sleeping in the kernel. If we can wake
1034 			 * it up, it will notice the cancellation when
1035 			 * it returns. If it doesn't wake up when we
1036 			 * make this call, then it's blocked
1037 			 * uninterruptably in the kernel, and there's
1038 			 * not much to be done about it.
1039 			 */
1040 			_lwp_wakeup(thread->pt_blockedlwp);
1041 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
1042 			/*
1043 			 * We're blocked somewhere (pthread__block()
1044 			 * was called). Cause it to wake up; it will
1045 			 * check for the cancellation if the routine
1046 			 * is a cancellation point, and loop and reblock
1047 			 * otherwise.
1048 			 */
1049 			pthread_spinlock(self, thread->pt_sleeplock);
1050 			PTQ_REMOVE(thread->pt_sleepq, thread,
1051 			    pt_sleep);
1052 			pthread_spinunlock(self, thread->pt_sleeplock);
1053 			pthread__sched(self, thread);
1054 		} else {
1055 			/*
1056 			 * Nothing. The target thread is running and will
1057 			 * notice at the next deferred cancellation point.
1058 			 */
1059 		}
1060 		pthread_spinunlock(self, &thread->pt_statelock);
1061 	} else
1062 		pthread_spinunlock(self, &thread->pt_flaglock);
1063 #else
1064 	pthread_spinlock(self, &thread->pt_flaglock);
1065 	thread->pt_flags |= PT_FLAG_CS_PENDING;
1066 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
1067 		thread->pt_cancel = 1;
1068 		pthread_spinunlock(self, &thread->pt_flaglock);
1069 		_lwp_wakeup(thread->pt_lid);
1070 	} else
1071 		pthread_spinunlock(self, &thread->pt_flaglock);
1072 #endif
1073 
1074 	return 0;
1075 }
1076 
1077 
1078 int
1079 pthread_setcancelstate(int state, int *oldstate)
1080 {
1081 	pthread_t self;
1082 	int retval;
1083 
1084 	self = pthread__self();
1085 	retval = 0;
1086 
1087 	pthread_spinlock(self, &self->pt_flaglock);
1088 	if (oldstate != NULL) {
1089 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
1090 			*oldstate = PTHREAD_CANCEL_DISABLE;
1091 		else
1092 			*oldstate = PTHREAD_CANCEL_ENABLE;
1093 	}
1094 
1095 	if (state == PTHREAD_CANCEL_DISABLE) {
1096 		self->pt_flags |= PT_FLAG_CS_DISABLED;
1097 		if (self->pt_cancel) {
1098 			self->pt_flags |= PT_FLAG_CS_PENDING;
1099 			self->pt_cancel = 0;
1100 		}
1101 	} else if (state == PTHREAD_CANCEL_ENABLE) {
1102 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
1103 		/*
1104 		 * If a cancellation was requested while cancellation
1105 		 * was disabled, note that fact for future
1106 		 * cancellation tests.
1107 		 */
1108 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
1109 			self->pt_cancel = 1;
1110 			/* This is not a deferred cancellation point. */
1111 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
1112 				pthread_spinunlock(self, &self->pt_flaglock);
1113 				pthread_exit(PTHREAD_CANCELED);
1114 			}
1115 		}
1116 	} else
1117 		retval = EINVAL;
1118 
1119 	pthread_spinunlock(self, &self->pt_flaglock);
1120 	return retval;
1121 }
1122 
1123 
1124 int
1125 pthread_setcanceltype(int type, int *oldtype)
1126 {
1127 	pthread_t self;
1128 	int retval;
1129 
1130 	self = pthread__self();
1131 	retval = 0;
1132 
1133 	pthread_spinlock(self, &self->pt_flaglock);
1134 
1135 	if (oldtype != NULL) {
1136 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
1137 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
1138 		else
1139 			*oldtype = PTHREAD_CANCEL_DEFERRED;
1140 	}
1141 
1142 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1143 		self->pt_flags |= PT_FLAG_CS_ASYNC;
1144 		if (self->pt_cancel) {
1145 			pthread_spinunlock(self, &self->pt_flaglock);
1146 			pthread_exit(PTHREAD_CANCELED);
1147 		}
1148 	} else if (type == PTHREAD_CANCEL_DEFERRED)
1149 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
1150 	else
1151 		retval = EINVAL;
1152 
1153 	pthread_spinunlock(self, &self->pt_flaglock);
1154 	return retval;
1155 }
1156 
1157 
1158 void
1159 pthread_testcancel()
1160 {
1161 	pthread_t self;
1162 
1163 	self = pthread__self();
1164 	if (self->pt_cancel)
1165 		pthread_exit(PTHREAD_CANCELED);
1166 }
1167 
1168 
1169 /*
1170  * POSIX requires that certain functions return an error rather than
1171  * invoking undefined behavior even when handed completely bogus
1172  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
1173  * utility routine searches the list of threads for the pthread_t
1174  * value without dereferencing it.
1175  */
1176 int
1177 pthread__find(pthread_t self, pthread_t id)
1178 {
1179 	pthread_t target;
1180 
1181 	pthread_spinlock(self, &pthread__allqueue_lock);
1182 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
1183 	    if (target == id)
1184 		    break;
1185 	pthread_spinunlock(self, &pthread__allqueue_lock);
1186 
1187 	if (target == NULL)
1188 		return ESRCH;
1189 
1190 	return 0;
1191 }
1192 
1193 
1194 void
1195 pthread__testcancel(pthread_t self)
1196 {
1197 
1198 	if (self->pt_cancel)
1199 		pthread_exit(PTHREAD_CANCELED);
1200 }
1201 
1202 
1203 void
1204 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
1205 {
1206 	pthread_t self;
1207 	struct pt_clean_t *entry;
1208 
1209 	self = pthread__self();
1210 	entry = store;
1211 	entry->ptc_cleanup = cleanup;
1212 	entry->ptc_arg = arg;
1213 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1214 }
1215 
1216 
1217 void
1218 pthread__cleanup_pop(int ex, void *store)
1219 {
1220 	pthread_t self;
1221 	struct pt_clean_t *entry;
1222 
1223 	self = pthread__self();
1224 	entry = store;
1225 
1226 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
1227 	if (ex)
1228 		(*entry->ptc_cleanup)(entry->ptc_arg);
1229 }
1230 
1231 
1232 int *
1233 pthread__errno(void)
1234 {
1235 	pthread_t self;
1236 
1237 	self = pthread__self();
1238 
1239 	return &(self->pt_errno);
1240 }
1241 
1242 ssize_t	_sys_write(int, const void *, size_t);
1243 
1244 void
1245 pthread__assertfunc(const char *file, int line, const char *function,
1246 		    const char *expr)
1247 {
1248 	char buf[1024];
1249 	int len;
1250 
1251 	SDPRINTF(("(af)\n"));
1252 
1253 	/*
1254 	 * snprintf should not acquire any locks, or we could
1255 	 * end up deadlocked if the assert caller held locks.
1256 	 */
1257 	len = snprintf(buf, 1024,
1258 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
1259 	    expr, file, line,
1260 	    function ? ", function \"" : "",
1261 	    function ? function : "",
1262 	    function ? "\"" : "");
1263 
1264 	_sys_write(STDERR_FILENO, buf, (size_t)len);
1265 	(void)kill(getpid(), SIGABRT);
1266 
1267 	_exit(1);
1268 }
1269 
1270 
1271 void
1272 pthread__errorfunc(const char *file, int line, const char *function,
1273 		   const char *msg)
1274 {
1275 	char buf[1024];
1276 	size_t len;
1277 
1278 	if (pthread__diagassert == 0)
1279 		return;
1280 
1281 	/*
1282 	 * snprintf should not acquire any locks, or we could
1283 	 * end up deadlocked if the assert caller held locks.
1284 	 */
1285 	len = snprintf(buf, 1024,
1286 	    "%s: Error detected by libpthread: %s.\n"
1287 	    "Detected by file \"%s\", line %d%s%s%s.\n"
1288 	    "See pthread(3) for information.\n",
1289 	    getprogname(), msg, file, line,
1290 	    function ? ", function \"" : "",
1291 	    function ? function : "",
1292 	    function ? "\"" : "");
1293 
1294 	if (pthread__diagassert & DIAGASSERT_STDERR)
1295 		_sys_write(STDERR_FILENO, buf, len);
1296 
1297 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
1298 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
1299 
1300 	if (pthread__diagassert & DIAGASSERT_ABORT) {
1301 		(void)kill(getpid(), SIGABRT);
1302 		_exit(1);
1303 	}
1304 }
1305 
1306 #ifndef PTHREAD_SA
1307 
1308 #define	OOPS(msg)			\
1309     pthread__errorfunc(__FILE__, __LINE__, __FUNCTION__, msg)
1310 
1311 int
1312 pthread__park(pthread_t self, pthread_spin_t *lock,
1313 	      void *obj, struct pthread_queue_t *queue,
1314 	      const struct timespec *abstime, int tail)
1315 {
1316 	int rv;
1317 
1318 	SDPRINTF(("(pthread__park %p) obj %p enter\n", self, obj));
1319 
1320 	if (queue != NULL) {
1321 		if (tail)
1322 			PTQ_INSERT_TAIL(queue, self, pt_sleep);
1323 		else
1324 			PTQ_INSERT_HEAD(queue, self, pt_sleep);
1325 	}
1326 	self->pt_sleepobj = obj;
1327 
1328 	do {
1329 		pthread_spinunlock(self, lock);
1330 
1331 		if ((rv = _lwp_park((const void *)abstime, NULL)) != 0)
1332 			rv = errno;
1333 		else
1334 			rv = 0;
1335 
1336 		switch (rv) {
1337 		case 0:
1338 		case EINTR:
1339 		case ETIMEDOUT:
1340 		case EALREADY:
1341 			break;
1342 		default:
1343 			OOPS("_lwp_park failed");
1344 			SDPRINTF(("(pthread__park %p) syscall rv=%d\n",
1345 			    self, rv));
1346 			break;
1347 		}
1348 
1349 		pthread_spinlock(self, lock);
1350 	} while (self->pt_sleepobj != NULL && rv != ETIMEDOUT);
1351 
1352 	if (rv == ETIMEDOUT && queue != NULL && self->pt_sleepobj != NULL) {
1353 		PTQ_REMOVE(queue, self, pt_sleep);
1354 		self->pt_sleepobj = NULL;
1355 	}
1356 
1357 	SDPRINTF(("(pthread__park %p) obj %p exit\n", self, obj));
1358 
1359 	return rv;
1360 }
1361 
1362 void
1363 pthread__unpark(pthread_t self, pthread_spin_t *lock, void *obj,
1364 		pthread_t target)
1365 {
1366 	int rv;
1367 
1368 	if (target != NULL) {
1369 		SDPRINTF(("(pthread__unpark %p) obj %p target %p\n", self, obj,
1370 		    target));
1371 
1372 		target->pt_sleepobj = NULL;
1373 		pthread_spinunlock(self, lock);
1374 		rv = _lwp_unpark(target->pt_lid);
1375 
1376 		if (rv != 0 && errno != EALREADY && errno != EINTR) {
1377 			SDPRINTF(("(pthread__unpark %p) syscall rv=%d\n",
1378 			    self, rv));
1379 			OOPS("_lwp_unpark failed");
1380 		}
1381 	} else
1382 		pthread_spinunlock(self, lock);
1383 }
1384 
1385 void
1386 pthread__unpark_all(pthread_t self, pthread_spin_t *lock, void *obj,
1387 		    struct pthread_queue_t *queue)
1388 {
1389 	pthread_t thread;
1390 	lwpid_t waiters[32];	/* XXXLWP get value from kernel */
1391 	int nwaiters, rv;
1392 
1393 	for (;;) {
1394 		nwaiters = 0;
1395 		while (nwaiters < sizeof(waiters) / sizeof(waiters[0]) &&
1396 		    (thread = PTQ_FIRST(queue)) != NULL) {
1397 			PTQ_REMOVE(queue, thread, pt_sleep);
1398 			thread->pt_sleepobj = NULL;
1399 			waiters[nwaiters++] = thread->pt_lid;
1400 			SDPRINTF(("(pthread__unpark_all %p) obj %p "
1401 			    "unpark %p\n", self, obj, thread));
1402 		}
1403 		pthread_spinunlock(self, lock);
1404 		switch (nwaiters) {
1405 		case 0:
1406 			return;
1407 		case 1:
1408 			rv = _lwp_unpark(waiters[0]);
1409 			if (rv != 0 && errno != EALREADY && errno != EINTR) {
1410 				OOPS("_lwp_unpark failed");
1411 				SDPRINTF(("(pthread__unpark_all %p) "
1412 				    "syscall rv=%d\n", self, rv));
1413 			}
1414 			return;
1415 		default:
1416 			rv = _lwp_unpark_all(waiters, nwaiters);
1417 			if (rv != 0 && errno != EINTR) {
1418 				OOPS("_lwp_unpark_all failed");
1419 				SDPRINTF(("(pthread__unpark_all %p) "
1420 				    "syscall rv=%d\n", self, rv));
1421 			}
1422 			break;
1423 		}
1424 		if (PTQ_EMPTY(queue))
1425 			return;
1426 		pthread_spinlock(self, lock);
1427 	}
1428 }
1429 
1430 #undef	OOPS
1431 
1432 #endif	/* !PTHREAD_SA */
1433