xref: /netbsd-src/lib/libpthread/pthread.c (revision bfb6cb13d599546df69c7e4d20d70e22e15a549d)
1 /*	$NetBSD: pthread.c,v 1.68 2007/03/24 18:51:59 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2003, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.68 2007/03/24 18:51:59 ad Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <syslog.h>
50 #include <ucontext.h>
51 #include <unistd.h>
52 #include <sys/param.h>
53 #include <sys/sysctl.h>
54 
55 #include <sched.h>
56 #include "pthread.h"
57 #include "pthread_int.h"
58 
59 #ifdef PTHREAD_MAIN_DEBUG
60 #define SDPRINTF(x) DPRINTF(x)
61 #else
62 #define SDPRINTF(x)
63 #endif
64 
65 /* Maximum number of LWPs to unpark in one operation. */
66 #define	PTHREAD__UNPARK_MAX	128
67 
68 /* How many times to try acquiring spin locks on MP systems. */
69 #define	PTHREAD__NSPINS		1024
70 
71 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
72 static void	pthread__dead(pthread_t, pthread_t);
73 
74 int pthread__started;
75 
76 pthread_spin_t pthread__allqueue_lock = __SIMPLELOCK_UNLOCKED;
77 struct pthread_queue_t pthread__allqueue;
78 
79 pthread_spin_t pthread__deadqueue_lock = __SIMPLELOCK_UNLOCKED;
80 struct pthread_queue_t pthread__deadqueue;
81 struct pthread_queue_t *pthread__reidlequeue;
82 
83 static int nthreads;
84 static int nextthread;
85 static pthread_spin_t nextthread_lock = __SIMPLELOCK_UNLOCKED;
86 static pthread_attr_t pthread_default_attr;
87 
88 enum {
89 	DIAGASSERT_ABORT =	1<<0,
90 	DIAGASSERT_STDERR =	1<<1,
91 	DIAGASSERT_SYSLOG =	1<<2
92 };
93 
94 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
95 
96 int pthread__concurrency, pthread__maxconcurrency, pthread__nspins;
97 int pthread__unpark_max = PTHREAD__UNPARK_MAX;
98 
99 int _sys___sigprocmask14(int, const sigset_t *, sigset_t *);
100 
101 __strong_alias(__libc_thr_self,pthread_self)
102 __strong_alias(__libc_thr_create,pthread_create)
103 __strong_alias(__libc_thr_exit,pthread_exit)
104 __strong_alias(__libc_thr_errno,pthread__errno)
105 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
106 
107 /*
108  * Static library kludge.  Place a reference to a symbol any library
109  * file which does not already have a reference here.
110  */
111 extern int pthread__cancel_stub_binder;
112 
113 void *pthread__static_lib_binder[] = {
114 	&pthread__cancel_stub_binder,
115 	pthread_cond_init,
116 	pthread_mutex_init,
117 	pthread_rwlock_init,
118 	pthread_barrier_init,
119 	pthread_key_create,
120 	pthread_setspecific,
121 };
122 
123 /*
124  * This needs to be started by the library loading code, before main()
125  * gets to run, for various things that use the state of the initial thread
126  * to work properly (thread-specific data is an application-visible example;
127  * spinlock counts for mutexes is an internal example).
128  */
129 void
130 pthread_init(void)
131 {
132 	pthread_t first;
133 	char *p;
134 	int i, mib[2], ncpu;
135 	size_t len;
136 	extern int __isthreaded;
137 
138 	mib[0] = CTL_HW;
139 	mib[1] = HW_NCPU;
140 
141 	len = sizeof(ncpu);
142 	if (sysctl(mib, 2, &ncpu, &len, NULL, 0) == -1)
143 		err(1, "sysctl(hw.ncpu");
144 
145 	/* Initialize locks first; they're needed elsewhere. */
146 	pthread__lockprim_init(ncpu);
147 
148 	/*
149 	 * Get number of CPUs, and maximum number of LWPs that can be
150 	 * unparked at once.
151 	 */
152 	if ((pthread__concurrency = ncpu) > 1)
153 		pthread__nspins = PTHREAD__NSPINS;
154 	else
155 		pthread__nspins = 1;
156 	i = (int)_lwp_unpark_all(NULL, 0, NULL);
157 	if (i == -1)
158 		err(1, "_lwp_unpark_all");
159 	if (i < pthread__unpark_max)
160 		pthread__unpark_max = i;
161 
162 	/* Basic data structure setup */
163 	pthread_attr_init(&pthread_default_attr);
164 	PTQ_INIT(&pthread__allqueue);
165 	PTQ_INIT(&pthread__deadqueue);
166 	nthreads = 1;
167 	/* Create the thread structure corresponding to main() */
168 	pthread__initmain(&first);
169 	pthread__initthread(first, first);
170 
171 	first->pt_state = PT_STATE_RUNNING;
172 	first->pt_lid = _lwp_self();
173 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
174 
175 	/* Start subsystems */
176 	PTHREAD_MD_INIT
177 #ifdef PTHREAD__DEBUG
178 	pthread__debug_init(ncpu);
179 #endif
180 
181 	for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
182 		switch (*p) {
183 		case 'a':
184 			pthread__diagassert |= DIAGASSERT_ABORT;
185 			break;
186 		case 'A':
187 			pthread__diagassert &= ~DIAGASSERT_ABORT;
188 			break;
189 		case 'e':
190 			pthread__diagassert |= DIAGASSERT_STDERR;
191 			break;
192 		case 'E':
193 			pthread__diagassert &= ~DIAGASSERT_STDERR;
194 			break;
195 		case 'l':
196 			pthread__diagassert |= DIAGASSERT_SYSLOG;
197 			break;
198 		case 'L':
199 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
200 			break;
201 		}
202 	}
203 
204 
205 	/* Tell libc that we're here and it should role-play accordingly. */
206 	__isthreaded = 1;
207 }
208 
209 static void
210 pthread__child_callback(void)
211 {
212 	/*
213 	 * Clean up data structures that a forked child process might
214 	 * trip over. Note that if threads have been created (causing
215 	 * this handler to be registered) the standards say that the
216 	 * child will trigger undefined behavior if it makes any
217 	 * pthread_* calls (or any other calls that aren't
218 	 * async-signal-safe), so we don't really have to clean up
219 	 * much. Anything that permits some pthread_* calls to work is
220 	 * merely being polite.
221 	 */
222 	pthread__started = 0;
223 }
224 
225 static void
226 pthread__start(void)
227 {
228 	pthread_t self;
229 
230 	self = pthread__self(); /* should be the "main()" thread */
231 
232 	/*
233 	 * Per-process timers are cleared by fork(); despite the
234 	 * various restrictions on fork() and threads, it's legal to
235 	 * fork() before creating any threads.
236 	 */
237 	pthread_atfork(NULL, NULL, pthread__child_callback);
238 	SDPRINTF(("(pthread__start %p) Started.\n", self));
239 }
240 
241 
242 /* General-purpose thread data structure sanitization. */
243 void
244 pthread__initthread(pthread_t self, pthread_t t)
245 {
246 	int id;
247 
248 	pthread_spinlock(self, &nextthread_lock);
249 	id = nextthread;
250 	nextthread++;
251 	pthread_spinunlock(self, &nextthread_lock);
252 	t->pt_num = id;
253 
254 	t->pt_magic = PT_MAGIC;
255 	pthread_lockinit(&t->pt_flaglock);
256 	t->pt_spinlocks = 0;
257 	t->pt_exitval = NULL;
258 	t->pt_flags = 0;
259 	t->pt_cancel = 0;
260 	t->pt_errno = 0;
261 	t->pt_state = PT_STATE_RUNNING;
262 
263 	pthread_lockinit(&t->pt_statelock);
264 
265 	PTQ_INIT(&t->pt_joiners);
266 	pthread_lockinit(&t->pt_join_lock);
267 	PTQ_INIT(&t->pt_cleanup_stack);
268 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
269 	t->pt_name = NULL;
270 }
271 
272 
273 int
274 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
275 	    void *(*startfunc)(void *), void *arg)
276 {
277 	pthread_t self, newthread;
278 	pthread_attr_t nattr;
279 	struct pthread_attr_private *p;
280 	char * volatile name;
281 	int ret, flag;
282 
283 	PTHREADD_ADD(PTHREADD_CREATE);
284 
285 	/*
286 	 * It's okay to check this without a lock because there can
287 	 * only be one thread before it becomes true.
288 	 */
289 	if (pthread__started == 0) {
290 		pthread__start();
291 		pthread__started = 1;
292 	}
293 
294 	if (attr == NULL)
295 		nattr = pthread_default_attr;
296 	else if (attr->pta_magic == PT_ATTR_MAGIC)
297 		nattr = *attr;
298 	else
299 		return EINVAL;
300 
301 	/* Fetch misc. attributes from the attr structure. */
302 	name = NULL;
303 	if ((p = nattr.pta_private) != NULL)
304 		if (p->ptap_name[0] != '\0')
305 			if ((name = strdup(p->ptap_name)) == NULL)
306 				return ENOMEM;
307 
308 	self = pthread__self();
309 
310 	pthread_spinlock(self, &pthread__deadqueue_lock);
311 	newthread = PTQ_FIRST(&pthread__deadqueue);
312 	if (newthread != NULL) {
313 		if ((newthread->pt_flags & PT_FLAG_DETACHED) != 0 &&
314 		    (_lwp_kill(newthread->pt_lid, 0) == 0 || errno != ESRCH))
315 			newthread = NULL;
316 		else
317 			PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
318 	}
319 	pthread_spinunlock(self, &pthread__deadqueue_lock);
320 	if (newthread == NULL) {
321 		/* Set up a stack and allocate space for a pthread_st. */
322 		ret = pthread__stackalloc(&newthread);
323 		if (ret != 0) {
324 			if (name)
325 				free(name);
326 			return ret;
327 		}
328 	}
329 
330 	/* 2. Set up state. */
331 	pthread__initthread(self, newthread);
332 	newthread->pt_flags = nattr.pta_flags;
333 
334 	/* 3. Set up misc. attributes. */
335 	newthread->pt_name = name;
336 
337 	/*
338 	 * 4. Set up context.
339 	 *
340 	 * The pt_uc pointer points to a location safely below the
341 	 * stack start; this is arranged by pthread__stackalloc().
342 	 */
343 	_INITCONTEXT_U(newthread->pt_uc);
344 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
345 	pthread__uc_id(newthread->pt_uc) = newthread;
346 #endif
347 	newthread->pt_uc->uc_stack = newthread->pt_stack;
348 	newthread->pt_uc->uc_link = NULL;
349 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
350 	    startfunc, arg);
351 
352 	/* 5. Add to list of all threads. */
353 	pthread_spinlock(self, &pthread__allqueue_lock);
354 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
355 	nthreads++;
356 	pthread_spinunlock(self, &pthread__allqueue_lock);
357 
358 	/* 5a. Create the new LWP. */
359 	newthread->pt_sleeponq = 0;
360 	flag = 0;
361 	if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0)
362 		flag |= LWP_SUSPENDED;
363 	if ((newthread->pt_flags & PT_FLAG_DETACHED) != 0)
364 		flag |= LWP_DETACHED;
365 	ret = _lwp_create(newthread->pt_uc, (u_long)flag, &newthread->pt_lid);
366 	if (ret != 0) {
367 		SDPRINTF(("(pthread_create %p) _lwp_create: %s\n",
368 		    strerror(errno)));
369 		free(name);
370 		pthread_spinlock(self, &pthread__allqueue_lock);
371 		PTQ_REMOVE(&pthread__allqueue, newthread, pt_allq);
372 		nthreads--;
373 		pthread_spinunlock(self, &pthread__allqueue_lock);
374 		pthread_spinlock(self, &pthread__deadqueue_lock);
375 		PTQ_INSERT_HEAD(&pthread__deadqueue, newthread, pt_allq);
376 		pthread_spinunlock(self, &pthread__deadqueue_lock);
377 		return ret;
378 	}
379 
380 	SDPRINTF(("(pthread_create %p) new thread %p (name %p, lid %d).\n",
381 		  self, newthread, newthread->pt_name,
382 		  (int)newthread->pt_lid));
383 
384 	*thread = newthread;
385 
386 	return 0;
387 }
388 
389 
390 static void
391 pthread__create_tramp(void *(*start)(void *), void *arg)
392 {
393 	void *retval;
394 
395 	retval = (*start)(arg);
396 
397 	pthread_exit(retval);
398 
399 	/*NOTREACHED*/
400 	pthread__abort();
401 }
402 
403 int
404 pthread_suspend_np(pthread_t thread)
405 {
406 	pthread_t self;
407 
408 	self = pthread__self();
409 	if (self == thread) {
410 		return EDEADLK;
411 	}
412 #ifdef ERRORCHECK
413 	if (pthread__find(self, thread) != 0)
414 		return ESRCH;
415 #endif
416 	SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p.\n",
417 		     self, thread));
418 	return _lwp_suspend(thread->pt_lid);
419 }
420 
421 int
422 pthread_resume_np(pthread_t thread)
423 {
424 	pthread_t self;
425 
426 	self = pthread__self();
427 #ifdef ERRORCHECK
428 	if (pthread__find(self, thread) != 0)
429 		return ESRCH;
430 #endif
431 	SDPRINTF(("(pthread_resume_np %p) Resume thread %p.\n",
432 		     self, thread));
433 	return _lwp_continue(thread->pt_lid);
434 }
435 
436 void
437 pthread_exit(void *retval)
438 {
439 	pthread_t self;
440 	struct pt_clean_t *cleanup;
441 	char *name;
442 	int nt;
443 
444 	self = pthread__self();
445 	SDPRINTF(("(pthread_exit %p) status %p, flags %x, cancel %d\n",
446 		  self, retval, self->pt_flags, self->pt_cancel));
447 
448 	/* Disable cancellability. */
449 	pthread_spinlock(self, &self->pt_flaglock);
450 	self->pt_flags |= PT_FLAG_CS_DISABLED;
451 	self->pt_cancel = 0;
452 	pthread_spinunlock(self, &self->pt_flaglock);
453 
454 	/* Call any cancellation cleanup handlers */
455 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
456 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
457 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
458 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
459 	}
460 
461 	/* Perform cleanup of thread-specific data */
462 	pthread__destroy_tsd(self);
463 
464 	self->pt_exitval = retval;
465 
466 	/*
467 	 * it's safe to check PT_FLAG_DETACHED without pt_flaglock
468 	 * because it's only set by pthread_detach with pt_join_lock held.
469 	 */
470 	pthread_spinlock(self, &self->pt_join_lock);
471 	if (self->pt_flags & PT_FLAG_DETACHED) {
472 		self->pt_state = PT_STATE_DEAD;
473 		pthread_spinunlock(self, &self->pt_join_lock);
474 		name = self->pt_name;
475 		self->pt_name = NULL;
476 
477 		if (name != NULL)
478 			free(name);
479 
480 		pthread_spinlock(self, &pthread__allqueue_lock);
481 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
482 		nthreads--;
483 		nt = nthreads;
484 		pthread_spinunlock(self, &pthread__allqueue_lock);
485 
486 		if (nt == 0) {
487 			/* Whoah, we're the last one. Time to go. */
488 			exit(0);
489 		}
490 
491 		/* Yeah, yeah, doing work while we're dead is tacky. */
492 		pthread_spinlock(self, &pthread__deadqueue_lock);
493 		PTQ_INSERT_TAIL(&pthread__deadqueue, self, pt_allq);
494 		pthread_spinunlock(self, &pthread__deadqueue_lock);
495 		_lwp_exit();
496 	} else {
497 		self->pt_state = PT_STATE_ZOMBIE;
498 
499 		/* Note: name will be freed by the joiner. */
500 		pthread_spinlock(self, &pthread__allqueue_lock);
501 		nthreads--;
502 		nt = nthreads;
503 		pthread_spinunlock(self, &pthread__allqueue_lock);
504 		if (nt == 0) {
505 			/* Whoah, we're the last one. Time to go. */
506 			exit(0);
507 		}
508 		pthread_spinunlock(self, &self->pt_join_lock);
509 		_lwp_exit();
510 	}
511 
512 	/*NOTREACHED*/
513 	pthread__abort();
514 	exit(1);
515 }
516 
517 
518 int
519 pthread_join(pthread_t thread, void **valptr)
520 {
521 	pthread_t self;
522 	char *name;
523 	int num, retval;
524 
525 	self = pthread__self();
526 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
527 
528 	if (pthread__find(self, thread) != 0)
529 		return ESRCH;
530 
531 	if (thread->pt_magic != PT_MAGIC)
532 		return EINVAL;
533 
534 	if (thread == self)
535 		return EDEADLK;
536 
537 	retval = 0;
538 	name = NULL;
539  again:
540  	pthread_spinlock(self, &thread->pt_join_lock);
541 	switch (thread->pt_state) {
542 	case PT_STATE_RUNNING:
543 		pthread_spinunlock(self, &thread->pt_join_lock);
544 
545 		/*
546 		 * IEEE Std 1003.1, 2004 Edition:
547 		 *
548 		 * "The pthread_join() function shall not
549 		 * return an error code of [EINTR]."
550 		 */
551 		if (_lwp_wait(thread->pt_lid, &num) != 0 && errno != EINTR)
552 			return errno;
553 		goto again;
554 	case PT_STATE_ZOMBIE:
555 		if (valptr != NULL)
556 			*valptr = thread->pt_exitval;
557 		if (retval == 0) {
558 			name = thread->pt_name;
559 			thread->pt_name = NULL;
560 		}
561 		thread->pt_state = PT_STATE_DEAD;
562 		pthread_spinunlock(self, &thread->pt_join_lock);
563 		(void)_lwp_detach(thread->pt_lid);
564 		break;
565 	default:
566 		pthread_spinunlock(self, &thread->pt_join_lock);
567 		return EINVAL;
568 	}
569 
570 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
571 
572 	pthread__dead(self, thread);
573 
574 	if (name != NULL)
575 		free(name);
576 
577 	return retval;
578 }
579 
580 
581 int
582 pthread_equal(pthread_t t1, pthread_t t2)
583 {
584 
585 	/* Nothing special here. */
586 	return (t1 == t2);
587 }
588 
589 
590 int
591 pthread_detach(pthread_t thread)
592 {
593 	pthread_t self;
594 
595 	self = pthread__self();
596 
597 	if (pthread__find(self, thread) != 0)
598 		return ESRCH;
599 
600 	if (thread->pt_magic != PT_MAGIC)
601 		return EINVAL;
602 
603 	pthread_spinlock(self, &self->pt_join_lock);
604 	thread->pt_flags |= PT_FLAG_DETACHED;
605 	pthread_spinunlock(self, &self->pt_join_lock);
606 
607 	return _lwp_detach(thread->pt_lid);
608 }
609 
610 
611 static void
612 pthread__dead(pthread_t self, pthread_t thread)
613 {
614 
615 	SDPRINTF(("(pthread__dead %p) Reclaimed %p.\n", self, thread));
616 	pthread__assert(thread->pt_state == PT_STATE_DEAD);
617 	pthread__assert(thread->pt_name == NULL);
618 
619 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
620 	pthread_spinlock(self, &pthread__allqueue_lock);
621 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
622 	pthread_spinunlock(self, &pthread__allqueue_lock);
623 
624 	pthread_spinlock(self, &pthread__deadqueue_lock);
625 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
626 	pthread_spinunlock(self, &pthread__deadqueue_lock);
627 }
628 
629 
630 int
631 pthread_getname_np(pthread_t thread, char *name, size_t len)
632 {
633 	pthread_t self;
634 
635 	self = pthread__self();
636 
637 	if (pthread__find(self, thread) != 0)
638 		return ESRCH;
639 
640 	if (thread->pt_magic != PT_MAGIC)
641 		return EINVAL;
642 
643 	pthread_spinlock(self, &thread->pt_join_lock);
644 	if (thread->pt_name == NULL)
645 		name[0] = '\0';
646 	else
647 		strlcpy(name, thread->pt_name, len);
648 	pthread_spinunlock(self, &thread->pt_join_lock);
649 
650 	return 0;
651 }
652 
653 
654 int
655 pthread_setname_np(pthread_t thread, const char *name, void *arg)
656 {
657 	pthread_t self;
658 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
659 	int namelen;
660 
661 	self = pthread__self();
662 	if (pthread__find(self, thread) != 0)
663 		return ESRCH;
664 
665 	if (thread->pt_magic != PT_MAGIC)
666 		return EINVAL;
667 
668 	namelen = snprintf(newname, sizeof(newname), name, arg);
669 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
670 		return EINVAL;
671 
672 	cp = strdup(newname);
673 	if (cp == NULL)
674 		return ENOMEM;
675 
676 	pthread_spinlock(self, &thread->pt_join_lock);
677 	oldname = thread->pt_name;
678 	thread->pt_name = cp;
679 
680 	pthread_spinunlock(self, &thread->pt_join_lock);
681 
682 	if (oldname != NULL)
683 		free(oldname);
684 
685 	return 0;
686 }
687 
688 
689 
690 /*
691  * XXX There should be a way for applications to use the efficent
692  *  inline version, but there are opacity/namespace issues.
693  */
694 pthread_t
695 pthread_self(void)
696 {
697 
698 	return pthread__self();
699 }
700 
701 
702 int
703 pthread_cancel(pthread_t thread)
704 {
705 	pthread_t self;
706 
707 	self = pthread__self();
708 #ifdef ERRORCHECK
709 	if (pthread__find(self, thread) != 0)
710 		return ESRCH;
711 #endif
712 	pthread_spinlock(self, &thread->pt_flaglock);
713 	thread->pt_flags |= PT_FLAG_CS_PENDING;
714 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
715 		thread->pt_cancel = 1;
716 		pthread_spinunlock(self, &thread->pt_flaglock);
717 		_lwp_wakeup(thread->pt_lid);
718 	} else
719 		pthread_spinunlock(self, &thread->pt_flaglock);
720 
721 	return 0;
722 }
723 
724 
725 int
726 pthread_setcancelstate(int state, int *oldstate)
727 {
728 	pthread_t self;
729 	int retval;
730 
731 	self = pthread__self();
732 	retval = 0;
733 
734 	pthread_spinlock(self, &self->pt_flaglock);
735 	if (oldstate != NULL) {
736 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
737 			*oldstate = PTHREAD_CANCEL_DISABLE;
738 		else
739 			*oldstate = PTHREAD_CANCEL_ENABLE;
740 	}
741 
742 	if (state == PTHREAD_CANCEL_DISABLE) {
743 		self->pt_flags |= PT_FLAG_CS_DISABLED;
744 		if (self->pt_cancel) {
745 			self->pt_flags |= PT_FLAG_CS_PENDING;
746 			self->pt_cancel = 0;
747 		}
748 	} else if (state == PTHREAD_CANCEL_ENABLE) {
749 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
750 		/*
751 		 * If a cancellation was requested while cancellation
752 		 * was disabled, note that fact for future
753 		 * cancellation tests.
754 		 */
755 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
756 			self->pt_cancel = 1;
757 			/* This is not a deferred cancellation point. */
758 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
759 				pthread_spinunlock(self, &self->pt_flaglock);
760 				pthread_exit(PTHREAD_CANCELED);
761 			}
762 		}
763 	} else
764 		retval = EINVAL;
765 
766 	pthread_spinunlock(self, &self->pt_flaglock);
767 	return retval;
768 }
769 
770 
771 int
772 pthread_setcanceltype(int type, int *oldtype)
773 {
774 	pthread_t self;
775 	int retval;
776 
777 	self = pthread__self();
778 	retval = 0;
779 
780 	pthread_spinlock(self, &self->pt_flaglock);
781 
782 	if (oldtype != NULL) {
783 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
784 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
785 		else
786 			*oldtype = PTHREAD_CANCEL_DEFERRED;
787 	}
788 
789 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
790 		self->pt_flags |= PT_FLAG_CS_ASYNC;
791 		if (self->pt_cancel) {
792 			pthread_spinunlock(self, &self->pt_flaglock);
793 			pthread_exit(PTHREAD_CANCELED);
794 		}
795 	} else if (type == PTHREAD_CANCEL_DEFERRED)
796 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
797 	else
798 		retval = EINVAL;
799 
800 	pthread_spinunlock(self, &self->pt_flaglock);
801 	return retval;
802 }
803 
804 
805 void
806 pthread_testcancel()
807 {
808 	pthread_t self;
809 
810 	self = pthread__self();
811 	if (self->pt_cancel)
812 		pthread_exit(PTHREAD_CANCELED);
813 }
814 
815 
816 /*
817  * POSIX requires that certain functions return an error rather than
818  * invoking undefined behavior even when handed completely bogus
819  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
820  * utility routine searches the list of threads for the pthread_t
821  * value without dereferencing it.
822  */
823 int
824 pthread__find(pthread_t self, pthread_t id)
825 {
826 	pthread_t target;
827 
828 	pthread_spinlock(self, &pthread__allqueue_lock);
829 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
830 	    if (target == id)
831 		    break;
832 	pthread_spinunlock(self, &pthread__allqueue_lock);
833 
834 	if (target == NULL)
835 		return ESRCH;
836 
837 	return 0;
838 }
839 
840 
841 void
842 pthread__testcancel(pthread_t self)
843 {
844 
845 	if (self->pt_cancel)
846 		pthread_exit(PTHREAD_CANCELED);
847 }
848 
849 
850 void
851 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
852 {
853 	pthread_t self;
854 	struct pt_clean_t *entry;
855 
856 	self = pthread__self();
857 	entry = store;
858 	entry->ptc_cleanup = cleanup;
859 	entry->ptc_arg = arg;
860 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
861 }
862 
863 
864 void
865 pthread__cleanup_pop(int ex, void *store)
866 {
867 	pthread_t self;
868 	struct pt_clean_t *entry;
869 
870 	self = pthread__self();
871 	entry = store;
872 
873 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
874 	if (ex)
875 		(*entry->ptc_cleanup)(entry->ptc_arg);
876 }
877 
878 
879 int *
880 pthread__errno(void)
881 {
882 	pthread_t self;
883 
884 	self = pthread__self();
885 
886 	return &(self->pt_errno);
887 }
888 
889 ssize_t	_sys_write(int, const void *, size_t);
890 
891 void
892 pthread__assertfunc(const char *file, int line, const char *function,
893 		    const char *expr)
894 {
895 	char buf[1024];
896 	int len;
897 
898 	SDPRINTF(("(af)\n"));
899 
900 	/*
901 	 * snprintf should not acquire any locks, or we could
902 	 * end up deadlocked if the assert caller held locks.
903 	 */
904 	len = snprintf(buf, 1024,
905 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
906 	    expr, file, line,
907 	    function ? ", function \"" : "",
908 	    function ? function : "",
909 	    function ? "\"" : "");
910 
911 	_sys_write(STDERR_FILENO, buf, (size_t)len);
912 	(void)kill(getpid(), SIGABRT);
913 
914 	_exit(1);
915 }
916 
917 
918 void
919 pthread__errorfunc(const char *file, int line, const char *function,
920 		   const char *msg)
921 {
922 	char buf[1024];
923 	size_t len;
924 
925 	if (pthread__diagassert == 0)
926 		return;
927 
928 	/*
929 	 * snprintf should not acquire any locks, or we could
930 	 * end up deadlocked if the assert caller held locks.
931 	 */
932 	len = snprintf(buf, 1024,
933 	    "%s: Error detected by libpthread: %s.\n"
934 	    "Detected by file \"%s\", line %d%s%s%s.\n"
935 	    "See pthread(3) for information.\n",
936 	    getprogname(), msg, file, line,
937 	    function ? ", function \"" : "",
938 	    function ? function : "",
939 	    function ? "\"" : "");
940 
941 	if (pthread__diagassert & DIAGASSERT_STDERR)
942 		_sys_write(STDERR_FILENO, buf, len);
943 
944 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
945 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
946 
947 	if (pthread__diagassert & DIAGASSERT_ABORT) {
948 		(void)kill(getpid(), SIGABRT);
949 		_exit(1);
950 	}
951 }
952 
953 /*
954  * Thread park/unpark operations.  The kernel operations are
955  * modelled after a brief description from "Multithreading in
956  * the Solaris Operating Environment":
957  *
958  * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf
959  */
960 
961 #define	OOPS(msg)			\
962     pthread__errorfunc(__FILE__, __LINE__, __func__, msg)
963 
964 int
965 pthread__park(pthread_t self, pthread_spin_t *lock,
966 	      struct pthread_queue_t *queue,
967 	      const struct timespec *abstime, int cancelpt,
968 	      const void *hint)
969 {
970 	int rv;
971 
972 	SDPRINTF(("(pthread__park %p) queue %p enter\n", self, queue));
973 
974 	/*
975 	 * Wait until we are awoken by a pending unpark operation,
976 	 * a signal, an unpark posted after we have gone asleep,
977 	 * or an expired timeout.
978 	 */
979 	rv = 0;
980 	do {
981 		pthread_spinunlock(self, lock);
982 		if (_lwp_park(abstime, NULL, hint) != 0) {
983 			switch (rv = errno) {
984 			case EINTR:
985 			case EALREADY:
986 				rv = 0;
987 				break;
988 			case ETIMEDOUT:
989 				break;
990 			default:
991 				OOPS("_lwp_park failed");
992 				SDPRINTF(("(pthread__park %p) syscall rv=%d\n",
993 				    self, rv));
994 				break;
995 			}
996 		}
997 		/* Check for cancellation. */
998 		if (cancelpt && self->pt_cancel) {
999 			/*
1000 			 * Ensure visibility of the correct value.
1001 			 * _lwp_park/_lwp_wakeup also provide a
1002 			 * barrier.
1003 			 */
1004 			pthread_spinlock(self, &self->pt_flaglock);
1005 			if (self->pt_cancel)
1006 				rv = EINTR;
1007 			pthread_spinunlock(self, &self->pt_flaglock);
1008 		}
1009 		pthread_spinlock(self, lock);
1010 	} while (self->pt_sleepobj != NULL && rv == 0);
1011 
1012 	/*
1013 	 * If we have been awoken early but are still on the queue,
1014 	 * then remove ourself.
1015 	 */
1016 	if (self->pt_sleeponq) {
1017 		PTQ_REMOVE(queue, self, pt_sleep);
1018 		self->pt_sleepobj = NULL;
1019 		self->pt_sleeponq = 0;
1020 	}
1021 
1022 	SDPRINTF(("(pthread__park %p) queue %p exit\n", self, queue));
1023 
1024 	return rv;
1025 }
1026 
1027 void
1028 pthread__unpark(pthread_t self, pthread_spin_t *lock,
1029 		struct pthread_queue_t *queue, pthread_t target)
1030 {
1031 	int rv;
1032 
1033 	if (target == NULL) {
1034 		pthread_spinunlock(self, lock);
1035 		return;
1036 	}
1037 
1038 	SDPRINTF(("(pthread__unpark %p) queue %p target %p\n",
1039 	    self, queue, target));
1040 
1041 	/*
1042 	 * Easy: the thread has already been removed from
1043 	 * the queue, so just awaken it.
1044 	 */
1045 	target->pt_sleepobj = NULL;
1046 	target->pt_sleeponq = 0;
1047 	pthread_spinunlock(self, lock);
1048 	rv = _lwp_unpark(target->pt_lid, queue);
1049 
1050 	if (rv != 0 && errno != EALREADY && errno != EINTR) {
1051 		SDPRINTF(("(pthread__unpark %p) syscall rv=%d\n",
1052 		    self, rv));
1053 		OOPS("_lwp_unpark failed");
1054 	}
1055 }
1056 
1057 void
1058 pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
1059 		    struct pthread_queue_t *queue)
1060 {
1061 	lwpid_t waiters[PTHREAD__UNPARK_MAX];
1062 	ssize_t n, rv;
1063 	pthread_t thread, next;
1064 
1065 	if (PTQ_EMPTY(queue)) {
1066 		pthread_spinunlock(self, lock);
1067 		return;
1068 	}
1069 
1070 	/*
1071 	 * First, clear all sleepobj pointers, since we can release the
1072 	 * spin lock before awkening everybody, and must synchronise with
1073 	 * pthread__park().
1074 	 */
1075 	PTQ_FOREACH(thread, queue, pt_sleep) {
1076 		thread->pt_sleepobj = NULL;
1077 		if (thread == PTQ_NEXT(thread, pt_sleep))
1078 			OOPS("unpark: thread linked to self");
1079 	}
1080 
1081 	for (;;) {
1082 		thread = PTQ_FIRST(queue);
1083 		for (n = 0; n < pthread__unpark_max && thread != NULL;
1084 		    thread = next) {
1085 			/*
1086 			 * If the sleepobj pointer is non-NULL, it
1087 			 * means one of two things:
1088 			 *
1089 			 * o The thread has awoken early, spun
1090 			 *   through application code and is
1091 			 *   once more asleep on this object.
1092 			 *
1093 			 * o This is a new thread that has blocked
1094 			 *   on the object after we have released
1095 			 *   the interlock in this loop.
1096 			 *
1097 			 * In both cases we shouldn't remove the
1098 			 * thread from the queue.
1099 			 *
1100 			 * XXXLWP basic fairness issues here.
1101 			 */
1102 			next = PTQ_NEXT(thread, pt_sleep);
1103 			if (thread->pt_sleepobj != NULL)
1104 			    	continue;
1105 			thread->pt_sleeponq = 0;
1106 			waiters[n++] = thread->pt_lid;
1107 			PTQ_REMOVE(queue, thread, pt_sleep);
1108 			SDPRINTF(("(pthread__unpark_all %p) queue %p "
1109 			    "unpark %p\n", self, queue, thread));
1110 		}
1111 
1112 		pthread_spinunlock(self, lock);
1113 		switch (n) {
1114 		case 0:
1115 			return;
1116 		case 1:
1117 			rv = (ssize_t)_lwp_unpark(waiters[0], queue);
1118 			if (rv != 0 && errno != EALREADY && errno != EINTR) {
1119 				OOPS("_lwp_unpark failed");
1120 				SDPRINTF(("(pthread__unpark_all %p) "
1121 				    "syscall rv=%d\n", self, rv));
1122 			}
1123 			return;
1124 		default:
1125 			rv = _lwp_unpark_all(waiters, n, queue);
1126 			if (rv != 0 && errno != EINTR) {
1127 				OOPS("_lwp_unpark_all failed");
1128 				SDPRINTF(("(pthread__unpark_all %p) "
1129 				    "syscall rv=%d\n", self, rv));
1130 			}
1131 			break;
1132 		}
1133 		pthread_spinlock(self, lock);
1134 	}
1135 }
1136 
1137 #undef	OOPS
1138