xref: /netbsd-src/lib/libpthread/pthread.c (revision 075022b349321510ad14036d70908681ed02f117)
1 /*	$NetBSD: pthread.c,v 1.28 2003/07/21 22:24:09 nathanw Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001,2002,2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.28 2003/07/21 22:24:09 nathanw Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <syslog.h>
50 #include <ucontext.h>
51 #include <unistd.h>
52 
53 #include <sched.h>
54 #include "pthread.h"
55 #include "pthread_int.h"
56 
57 #ifdef PTHREAD_MAIN_DEBUG
58 #define SDPRINTF(x) DPRINTF(x)
59 #else
60 #define SDPRINTF(x)
61 #endif
62 
63 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
64 
65 int pthread__started;
66 
67 pthread_spin_t pthread__allqueue_lock;
68 struct pthread_queue_t pthread__allqueue;
69 
70 pthread_spin_t pthread__deadqueue_lock;
71 struct pthread_queue_t pthread__deadqueue;
72 struct pthread_queue_t pthread__reidlequeue;
73 
74 static int nthreads;
75 static int nextthread;
76 static pthread_spin_t nextthread_lock;
77 static pthread_attr_t pthread_default_attr;
78 
79 enum {
80 	DIAGASSERT_ABORT =	1<<0,
81 	DIAGASSERT_STDERR =	1<<1,
82 	DIAGASSERT_SYSLOG =	1<<2
83 };
84 
85 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
86 
87 pthread_spin_t pthread__runqueue_lock;
88 struct pthread_queue_t pthread__runqueue;
89 struct pthread_queue_t pthread__idlequeue;
90 
91 __strong_alias(__libc_thr_self,pthread_self)
92 __strong_alias(__libc_thr_create,pthread_create)
93 __strong_alias(__libc_thr_exit,pthread_exit)
94 __strong_alias(__libc_thr_errno,pthread__errno)
95 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
96 
97 /*
98  * Static library kludge.  Place a reference to a symbol any library
99  * file which does not already have a reference here.
100  */
101 extern int pthread__cancel_stub_binder;
102 extern int pthread__sched_binder;
103 extern struct pthread_queue_t pthread__nanosleeping;
104 
105 void *pthread__static_lib_binder[] = {
106 	&pthread__cancel_stub_binder,
107 	pthread_cond_init,
108 	pthread_mutex_init,
109 	pthread_rwlock_init,
110 	pthread_barrier_init,
111 	pthread_key_create,
112 	&pthread__sched_binder,
113 	&pthread__nanosleeping
114 };
115 
116 /*
117  * This needs to be started by the library loading code, before main()
118  * gets to run, for various things that use the state of the initial thread
119  * to work properly (thread-specific data is an application-visible example;
120  * spinlock counts for mutexes is an internal example).
121  */
122 void
123 pthread_init(void)
124 {
125 	pthread_t first;
126 	char *p;
127 	extern int __isthreaded;
128 
129 	/* Initialize locks first; they're needed elsewhere. */
130 	pthread__lockprim_init();
131 
132 	/* Basic data structure setup */
133 	pthread_attr_init(&pthread_default_attr);
134 	PTQ_INIT(&pthread__allqueue);
135 	PTQ_INIT(&pthread__deadqueue);
136 	PTQ_INIT(&pthread__reidlequeue);
137 	PTQ_INIT(&pthread__runqueue);
138 	PTQ_INIT(&pthread__idlequeue);
139 	nthreads = 1;
140 
141 	/* Create the thread structure corresponding to main() */
142 	pthread__initmain(&first);
143 	pthread__initthread(first, first);
144 	first->pt_state = PT_STATE_RUNNING;
145 	sigprocmask(0, NULL, &first->pt_sigmask);
146 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
147 
148 	/* Start subsystems */
149 	pthread__signal_init();
150 	PTHREAD_MD_INIT
151 #ifdef PTHREAD__DEBUG
152 	pthread__debug_init();
153 #endif
154 
155 	for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
156 		switch (*p) {
157 		case 'a':
158 			pthread__diagassert |= DIAGASSERT_ABORT;
159 			break;
160 		case 'A':
161 			pthread__diagassert &= ~DIAGASSERT_ABORT;
162 			break;
163 		case 'e':
164 			pthread__diagassert |= DIAGASSERT_STDERR;
165 			break;
166 		case 'E':
167 			pthread__diagassert &= ~DIAGASSERT_STDERR;
168 			break;
169 		case 'l':
170 			pthread__diagassert |= DIAGASSERT_SYSLOG;
171 			break;
172 		case 'L':
173 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
174 			break;
175 		}
176 	}
177 
178 
179 	/* Tell libc that we're here and it should role-play accordingly. */
180 	__isthreaded = 1;
181 }
182 
183 static void
184 pthread__child_callback(void)
185 {
186 	/*
187 	 * Clean up data structures that a forked child process might
188 	 * trip over. Note that if threads have been created (causing
189 	 * this handler to be registered) the standards say that the
190 	 * child will trigger undefined behavior if it makes any
191 	 * pthread_* calls (or any other calls that aren't
192 	 * async-signal-safe), so we don't really have to clean up
193 	 * much. Anything that permits some pthread_* calls to work is
194 	 * merely being polite.
195 	 */
196 	pthread__started = 0;
197 }
198 
199 static void
200 pthread__start(void)
201 {
202 	pthread_t self, idle;
203 	int i, ret;
204 
205 	self = pthread__self(); /* should be the "main()" thread */
206 
207 	/*
208 	 * Per-process timers are cleared by fork(); despite the
209 	 * various restrictions on fork() and threads, it's legal to
210 	 * fork() before creating any threads.
211 	 */
212 	pthread__alarm_init();
213 
214 	pthread_atfork(NULL, NULL, pthread__child_callback);
215 
216 	/* Create idle threads */
217 	for (i = 0; i < NIDLETHREADS; i++) {
218 		ret = pthread__stackalloc(&idle);
219 		if (ret != 0)
220 			err(1, "Couldn't allocate stack for idle thread!");
221 		pthread__initthread(self, idle);
222 		sigfillset(&idle->pt_sigmask);
223 		idle->pt_type = PT_THREAD_IDLE;
224 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
225 		pthread__sched_idle(self, idle);
226 	}
227 
228 	/* Start up the SA subsystem */
229 	pthread__sa_start();
230 	SDPRINTF(("(pthread__start %p) Started.\n", self));
231 }
232 
233 
234 /* General-purpose thread data structure sanitization. */
235 void
236 pthread__initthread(pthread_t self, pthread_t t)
237 {
238 	int id;
239 
240 	pthread_spinlock(self, &nextthread_lock);
241 	id = nextthread;
242 	nextthread++;
243 	pthread_spinunlock(self, &nextthread_lock);
244 	t->pt_num = id;
245 
246 	t->pt_magic = PT_MAGIC;
247 	t->pt_type = PT_THREAD_NORMAL;
248 	t->pt_state = PT_STATE_RUNNABLE;
249 	pthread_lockinit(&t->pt_statelock);
250 	pthread_lockinit(&t->pt_flaglock);
251 	t->pt_spinlocks = 0;
252 	t->pt_next = NULL;
253 	t->pt_exitval = NULL;
254 	t->pt_flags = 0;
255 	t->pt_cancel = 0;
256 	t->pt_errno = 0;
257 	t->pt_parent = NULL;
258 	t->pt_heldlock = NULL;
259 	t->pt_switchto = NULL;
260 	t->pt_trapuc = NULL;
261 	sigemptyset(&t->pt_siglist);
262 	sigemptyset(&t->pt_sigmask);
263 	pthread_lockinit(&t->pt_siglock);
264 	PTQ_INIT(&t->pt_joiners);
265 	pthread_lockinit(&t->pt_join_lock);
266 	PTQ_INIT(&t->pt_cleanup_stack);
267 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
268 	t->pt_name = NULL;
269 #ifdef PTHREAD__DEBUG
270 	t->blocks = 0;
271 	t->preempts = 0;
272 	t->rescheds = 0;
273 #endif
274 }
275 
276 
277 int
278 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
279 	    void *(*startfunc)(void *), void *arg)
280 {
281 	pthread_t self, newthread;
282 	pthread_attr_t nattr;
283 	struct pthread_attr_private *p;
284 	char *name;
285 	int ret;
286 
287 	PTHREADD_ADD(PTHREADD_CREATE);
288 
289 	/*
290 	 * It's okay to check this without a lock because there can
291 	 * only be one thread before it becomes true.
292 	 */
293 	if (pthread__started == 0) {
294 		pthread__start();
295 		pthread__started = 1;
296 	}
297 
298 	if (attr == NULL)
299 		nattr = pthread_default_attr;
300 	else if (attr->pta_magic == PT_ATTR_MAGIC)
301 		nattr = *attr;
302 	else
303 		return EINVAL;
304 
305 	/* Fetch misc. attributes from the attr structure. */
306 	name = NULL;
307 	if ((p = nattr.pta_private) != NULL)
308 		if (p->ptap_name[0] != '\0')
309 			if ((name = strdup(p->ptap_name)) == NULL)
310 				return ENOMEM;
311 
312 	self = pthread__self();
313 
314 	pthread_spinlock(self, &pthread__deadqueue_lock);
315 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
316 		newthread = PTQ_FIRST(&pthread__deadqueue);
317 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
318 		pthread_spinunlock(self, &pthread__deadqueue_lock);
319 	} else {
320 		pthread_spinunlock(self, &pthread__deadqueue_lock);
321 		/* Set up a stack and allocate space for a pthread_st. */
322 		ret = pthread__stackalloc(&newthread);
323 		if (ret != 0)
324 			return ret;
325 	}
326 
327 	/* 2. Set up state. */
328 	pthread__initthread(self, newthread);
329 	newthread->pt_flags = nattr.pta_flags;
330 	newthread->pt_sigmask = self->pt_sigmask;
331 
332 	/* 3. Set up misc. attributes. */
333 	newthread->pt_name = name;
334 
335 	/*
336 	 * 4. Set up context.
337 	 *
338 	 * The pt_uc pointer points to a location safely below the
339 	 * stack start; this is arranged by pthread__stackalloc().
340 	 */
341 	_INITCONTEXT_U(newthread->pt_uc);
342 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
343 	pthread__uc_id(newthread->pt_uc) = newthread;
344 #endif
345 	newthread->pt_uc->uc_stack = newthread->pt_stack;
346 	newthread->pt_uc->uc_link = NULL;
347 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
348 	    startfunc, arg);
349 
350 	/* 5. Add to list of all threads. */
351 	pthread_spinlock(self, &pthread__allqueue_lock);
352 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
353 	nthreads++;
354 	pthread_spinunlock(self, &pthread__allqueue_lock);
355 
356 	SDPRINTF(("(pthread_create %p) Created new thread %p (name pointer %p).\n", self, newthread, newthread->pt_name));
357 	/* 6. Put on run queue. */
358 	pthread__sched(self, newthread);
359 
360 	*thread = newthread;
361 
362 	return 0;
363 }
364 
365 
366 static void
367 pthread__create_tramp(void *(*start)(void *), void *arg)
368 {
369 	void *retval;
370 
371 	retval = start(arg);
372 
373 	pthread_exit(retval);
374 
375 	/*NOTREACHED*/
376 	pthread__abort();
377 }
378 
379 
380 /*
381  * Other threads will switch to the idle thread so that they
382  * can dispose of any awkward locks or recycle upcall state.
383  */
384 void
385 pthread__idle(void)
386 {
387 	pthread_t self;
388 
389 	PTHREADD_ADD(PTHREADD_IDLE);
390 	self = pthread__self();
391 	SDPRINTF(("(pthread__idle %p).\n", self));
392 
393 	/*
394 	 * The drill here is that we want to yield the processor,
395 	 * but for the thread itself to be recovered, we need to be on
396 	 * a list somewhere for the thread system to know about us.
397 	 */
398 	pthread_spinlock(self, &pthread__deadqueue_lock);
399 	PTQ_INSERT_TAIL(&pthread__reidlequeue, self, pt_runq);
400 	/* Don't need a flag lock; nothing else has a handle on this thread */
401 	self->pt_flags |= PT_FLAG_IDLED;
402 	pthread_spinunlock(self, &pthread__deadqueue_lock);
403 
404 	/*
405 	 * If we get to run this, then no preemption has happened
406 	 * (because the upcall handler will not continue an idle thread with
407 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
408 	 */
409 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
410 	sa_yield();
411 
412 	/* NOTREACHED */
413 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
414 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
415 	pthread__abort();
416 }
417 
418 
419 void
420 pthread_exit(void *retval)
421 {
422 	pthread_t self;
423 	struct pt_clean_t *cleanup;
424 	char *name;
425 	int nt, flags;
426 
427 	self = pthread__self();
428 	SDPRINTF(("(pthread_exit %p) Exiting (status %p, flags %x, cancel %d).\n", self, retval, self->pt_flags, self->pt_cancel));
429 
430 	/* Disable cancellability. */
431 	pthread_spinlock(self, &self->pt_flaglock);
432 	self->pt_flags |= PT_FLAG_CS_DISABLED;
433 	flags = self->pt_flags;
434 	self->pt_cancel = 0;
435 	pthread_spinunlock(self, &self->pt_flaglock);
436 
437 	/* Call any cancellation cleanup handlers */
438 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
439 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
440 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
441 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
442 	}
443 
444 	/* Perform cleanup of thread-specific data */
445 	pthread__destroy_tsd(self);
446 
447 	self->pt_exitval = retval;
448 
449 	if (flags & PT_FLAG_DETACHED) {
450 		name = self->pt_name;
451 		self->pt_name = NULL;
452 
453 		if (name != NULL)
454 			free(name);
455 
456 		pthread_spinlock(self, &pthread__allqueue_lock);
457 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
458 		nthreads--;
459 		nt = nthreads;
460 		pthread_spinunlock(self, &pthread__allqueue_lock);
461 
462 		self->pt_state = PT_STATE_DEAD;
463 		if (nt == 0) {
464 			/* Whoah, we're the last one. Time to go. */
465 			exit(0);
466 		}
467 
468 		/* Yeah, yeah, doing work while we're dead is tacky. */
469 		pthread_spinlock(self, &pthread__deadqueue_lock);
470 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
471 		pthread__block(self, &pthread__deadqueue_lock);
472 	} else {
473 		/* Note: name will be freed by the joiner. */
474 		pthread_spinlock(self, &self->pt_join_lock);
475 		pthread_spinlock(self, &pthread__allqueue_lock);
476 		nthreads--;
477 		nt = nthreads;
478 		self->pt_state = PT_STATE_ZOMBIE;
479 		pthread_spinunlock(self, &pthread__allqueue_lock);
480 		if (nt == 0) {
481 			/* Whoah, we're the last one. Time to go. */
482 			exit(0);
483 		}
484 		/*
485 		 * Wake up all the potential joiners. Only one can win.
486 		 * (Can you say "Thundering Herd"? I knew you could.)
487 		 */
488 		pthread__sched_sleepers(self, &self->pt_joiners);
489 		pthread__block(self, &self->pt_join_lock);
490 	}
491 
492 	/*NOTREACHED*/
493 	pthread__abort();
494 	exit(1);
495 }
496 
497 
498 int
499 pthread_join(pthread_t thread, void **valptr)
500 {
501 	pthread_t self;
502 	char *name;
503 	int num;
504 
505 	self = pthread__self();
506 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
507 
508 	if (pthread__find(self, thread) != 0)
509 		return ESRCH;
510 
511 	if (thread->pt_magic != PT_MAGIC)
512 		return EINVAL;
513 
514 	if (thread == self)
515 		return EDEADLK;
516 
517 	pthread_spinlock(self, &thread->pt_flaglock);
518 
519 	if (thread->pt_flags & PT_FLAG_DETACHED) {
520 		pthread_spinunlock(self, &thread->pt_flaglock);
521 		return EINVAL;
522 	}
523 
524 	num = thread->pt_num;
525 	pthread_spinlock(self, &thread->pt_join_lock);
526 	while (thread->pt_state != PT_STATE_ZOMBIE) {
527 		if ((thread->pt_state == PT_STATE_DEAD) ||
528 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
529 		    (thread->pt_num != num)) {
530 			/*
531 			 * Another thread beat us to the join, or called
532 			 * pthread_detach(). If num didn't match, the
533 			 * thread died and was recycled before we got
534 			 * another chance to run.
535 			 */
536 			pthread_spinunlock(self, &thread->pt_join_lock);
537 			pthread_spinunlock(self, &thread->pt_flaglock);
538 			return ESRCH;
539 		}
540 		/*
541 		 * "I'm not dead yet!"
542 		 * "You will be soon enough."
543 		 */
544 		pthread_spinunlock(self, &thread->pt_flaglock);
545 		pthread_spinlock(self, &self->pt_statelock);
546 		if (self->pt_cancel) {
547 			pthread_spinunlock(self, &self->pt_statelock);
548 			pthread_spinunlock(self, &thread->pt_join_lock);
549 			pthread_exit(PTHREAD_CANCELED);
550 		}
551 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
552 		self->pt_sleepobj = thread;
553 		self->pt_sleepq = &thread->pt_joiners;
554 		self->pt_sleeplock = &thread->pt_join_lock;
555 		pthread_spinunlock(self, &self->pt_statelock);
556 
557 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
558 		pthread__block(self, &thread->pt_join_lock);
559 		pthread_spinlock(self, &thread->pt_flaglock);
560 		pthread_spinlock(self, &thread->pt_join_lock);
561 	}
562 
563 	/* All ours. */
564 	thread->pt_state = PT_STATE_DEAD;
565 	name = thread->pt_name;
566 	thread->pt_name = NULL;
567 	pthread_spinunlock(self, &thread->pt_join_lock);
568 	pthread_spinunlock(self, &thread->pt_flaglock);
569 
570 	if (valptr != NULL)
571 		*valptr = thread->pt_exitval;
572 
573 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
574 
575 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
576 	pthread_spinlock(self, &pthread__allqueue_lock);
577 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
578 	pthread_spinunlock(self, &pthread__allqueue_lock);
579 
580 	pthread_spinlock(self, &pthread__deadqueue_lock);
581 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
582 	pthread_spinunlock(self, &pthread__deadqueue_lock);
583 
584 	if (name != NULL)
585 		free(name);
586 
587 	return 0;
588 }
589 
590 
591 int
592 pthread_equal(pthread_t t1, pthread_t t2)
593 {
594 
595 	/* Nothing special here. */
596 	return (t1 == t2);
597 }
598 
599 
600 int
601 pthread_detach(pthread_t thread)
602 {
603 	pthread_t self;
604 
605 	self = pthread__self();
606 
607 	if (pthread__find(self, thread) != 0)
608 		return ESRCH;
609 
610 	if (thread->pt_magic != PT_MAGIC)
611 		return EINVAL;
612 
613 	pthread_spinlock(self, &thread->pt_flaglock);
614 	pthread_spinlock(self, &thread->pt_join_lock);
615 
616 	if (thread->pt_flags & PT_FLAG_DETACHED) {
617 		pthread_spinunlock(self, &thread->pt_join_lock);
618 		pthread_spinunlock(self, &thread->pt_flaglock);
619 		return EINVAL;
620 	}
621 
622 	thread->pt_flags |= PT_FLAG_DETACHED;
623 
624 	/* Any joiners have to be punted now. */
625 	pthread__sched_sleepers(self, &thread->pt_joiners);
626 
627 	pthread_spinunlock(self, &thread->pt_join_lock);
628 	pthread_spinunlock(self, &thread->pt_flaglock);
629 
630 	return 0;
631 }
632 
633 
634 int
635 pthread_getname_np(pthread_t thread, char *name, size_t len)
636 {
637 	pthread_t self;
638 
639 	self = pthread__self();
640 
641 	if (pthread__find(self, thread) != 0)
642 		return ESRCH;
643 
644 	if (thread->pt_magic != PT_MAGIC)
645 		return EINVAL;
646 
647 	pthread_spinlock(self, &thread->pt_join_lock);
648 	if (thread->pt_name == NULL)
649 		name[0] = '\0';
650 	else
651 		strlcpy(name, thread->pt_name, len);
652 	pthread_spinunlock(self, &thread->pt_join_lock);
653 
654 	return 0;
655 }
656 
657 
658 int
659 pthread_setname_np(pthread_t thread, const char *name, void *arg)
660 {
661 	pthread_t self = pthread_self();
662 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
663 	int namelen;
664 
665 	if (pthread__find(self, thread) != 0)
666 		return ESRCH;
667 
668 	if (thread->pt_magic != PT_MAGIC)
669 		return EINVAL;
670 
671 	namelen = snprintf(newname, sizeof(newname), name, arg);
672 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
673 		return EINVAL;
674 
675 	cp = strdup(newname);
676 	if (cp == NULL)
677 		return ENOMEM;
678 
679 	pthread_spinlock(self, &thread->pt_join_lock);
680 
681 	if (thread->pt_state == PT_STATE_DEAD) {
682 		pthread_spinunlock(self, &thread->pt_join_lock);
683 		free(cp);
684 		return EINVAL;
685 	}
686 
687 	oldname = thread->pt_name;
688 	thread->pt_name = cp;
689 
690 	pthread_spinunlock(self, &thread->pt_join_lock);
691 
692 	if (oldname != NULL)
693 		free(oldname);
694 
695 	return 0;
696 }
697 
698 
699 
700 /*
701  * XXX There should be a way for applications to use the efficent
702  *  inline version, but there are opacity/namespace issues.
703  */
704 pthread_t
705 pthread_self(void)
706 {
707 
708 	return pthread__self();
709 }
710 
711 
712 int
713 pthread_cancel(pthread_t thread)
714 {
715 	pthread_t self;
716 
717 	if (!(thread->pt_state == PT_STATE_RUNNING ||
718 	    thread->pt_state == PT_STATE_RUNNABLE ||
719 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE ||
720 	    thread->pt_state == PT_STATE_BLOCKED_SYS))
721 		return ESRCH;
722 
723 	self = pthread__self();
724 
725 	pthread_spinlock(self, &thread->pt_flaglock);
726 	thread->pt_flags |= PT_FLAG_CS_PENDING;
727 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
728 		thread->pt_cancel = 1;
729 		pthread_spinunlock(self, &thread->pt_flaglock);
730 		pthread_spinlock(self, &thread->pt_statelock);
731 		if (thread->pt_state == PT_STATE_BLOCKED_SYS) {
732 			/*
733 			 * It's sleeping in the kernel. If we can wake
734 			 * it up, it will notice the cancellation when
735 			 * it returns. If it doesn't wake up when we
736 			 * make this call, then it's blocked
737 			 * uninterruptably in the kernel, and there's
738 			 * not much to be done about it.
739 			 */
740 			_lwp_wakeup(thread->pt_blockedlwp);
741 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
742 			/*
743 			 * We're blocked somewhere (pthread__block()
744 			 * was called). Cause it to wake up; it will
745 			 * check for the cancellation if the routine
746 			 * is a cancellation point, and loop and reblock
747 			 * otherwise.
748 			 */
749 			pthread_spinlock(self, thread->pt_sleeplock);
750 			PTQ_REMOVE(thread->pt_sleepq, thread,
751 			    pt_sleep);
752 			pthread_spinunlock(self, thread->pt_sleeplock);
753 			pthread__sched(self, thread);
754 		} else {
755 			/*
756 			 * Nothing. The target thread is running and will
757 			 * notice at the next deferred cancellation point.
758 			 */
759 		}
760 		pthread_spinunlock(self, &thread->pt_statelock);
761 	} else
762 		pthread_spinunlock(self, &thread->pt_flaglock);
763 
764 	return 0;
765 }
766 
767 
768 int
769 pthread_setcancelstate(int state, int *oldstate)
770 {
771 	pthread_t self;
772 	int retval;
773 
774 	self = pthread__self();
775 	retval = 0;
776 
777 	pthread_spinlock(self, &self->pt_flaglock);
778 	if (oldstate != NULL) {
779 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
780 			*oldstate = PTHREAD_CANCEL_DISABLE;
781 		else
782 			*oldstate = PTHREAD_CANCEL_ENABLE;
783 	}
784 
785 	if (state == PTHREAD_CANCEL_DISABLE) {
786 		self->pt_flags |= PT_FLAG_CS_DISABLED;
787 		if (self->pt_cancel) {
788 			self->pt_flags |= PT_FLAG_CS_PENDING;
789 			self->pt_cancel = 0;
790 		}
791 	} else if (state == PTHREAD_CANCEL_ENABLE) {
792 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
793 		/*
794 		 * If a cancellation was requested while cancellation
795 		 * was disabled, note that fact for future
796 		 * cancellation tests.
797 		 */
798 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
799 			self->pt_cancel = 1;
800 			/* This is not a deferred cancellation point. */
801 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
802 				pthread_spinunlock(self, &self->pt_flaglock);
803 				pthread_exit(PTHREAD_CANCELED);
804 			}
805 		}
806 	} else
807 		retval = EINVAL;
808 
809 	pthread_spinunlock(self, &self->pt_flaglock);
810 	return retval;
811 }
812 
813 
814 int
815 pthread_setcanceltype(int type, int *oldtype)
816 {
817 	pthread_t self;
818 	int retval;
819 
820 	self = pthread__self();
821 	retval = 0;
822 
823 	pthread_spinlock(self, &self->pt_flaglock);
824 
825 	if (oldtype != NULL) {
826 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
827 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
828 		else
829 			*oldtype = PTHREAD_CANCEL_DEFERRED;
830 	}
831 
832 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
833 		self->pt_flags |= PT_FLAG_CS_ASYNC;
834 		if (self->pt_cancel) {
835 			pthread_spinunlock(self, &self->pt_flaglock);
836 			pthread_exit(PTHREAD_CANCELED);
837 		}
838 	} else if (type == PTHREAD_CANCEL_DEFERRED)
839 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
840 	else
841 		retval = EINVAL;
842 
843 	pthread_spinunlock(self, &self->pt_flaglock);
844 	return retval;
845 }
846 
847 
848 void
849 pthread_testcancel()
850 {
851 	pthread_t self;
852 
853 	self = pthread__self();
854 	if (self->pt_cancel)
855 		pthread_exit(PTHREAD_CANCELED);
856 }
857 
858 
859 /*
860  * POSIX requires that certain functions return an error rather than
861  * invoking undefined behavior even when handed completely bogus
862  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
863  * utility routine searches the list of threads for the pthread_t
864  * value without dereferencing it.
865  */
866 int
867 pthread__find(pthread_t self, pthread_t id)
868 {
869 	pthread_t target;
870 
871 	pthread_spinlock(self, &pthread__allqueue_lock);
872 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
873 	    if (target == id)
874 		    break;
875 	pthread_spinunlock(self, &pthread__allqueue_lock);
876 
877 	if (target == NULL)
878 		return ESRCH;
879 
880 	return 0;
881 }
882 
883 
884 void
885 pthread__testcancel(pthread_t self)
886 {
887 
888 	if (self->pt_cancel)
889 		pthread_exit(PTHREAD_CANCELED);
890 }
891 
892 
893 void
894 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
895 {
896 	pthread_t self;
897 	struct pt_clean_t *entry;
898 
899 	self = pthread__self();
900 	entry = store;
901 	entry->ptc_cleanup = cleanup;
902 	entry->ptc_arg = arg;
903 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
904 }
905 
906 
907 void
908 pthread__cleanup_pop(int ex, void *store)
909 {
910 	pthread_t self;
911 	struct pt_clean_t *entry;
912 
913 	self = pthread__self();
914 	entry = store;
915 
916 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
917 	if (ex)
918 		(*entry->ptc_cleanup)(entry->ptc_arg);
919 }
920 
921 
922 int *
923 pthread__errno(void)
924 {
925 	pthread_t self;
926 
927 	self = pthread__self();
928 
929 	return &(self->pt_errno);
930 }
931 
932 ssize_t	_sys_write(int, const void *, size_t);
933 
934 void
935 pthread__assertfunc(char *file, int line, char *function, char *expr)
936 {
937 	char buf[1024];
938 	int len;
939 
940 	/*
941 	 * snprintf should not acquire any locks, or we could
942 	 * end up deadlocked if the assert caller held locks.
943 	 */
944 	len = snprintf(buf, 1024,
945 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
946 	    expr, file, line,
947 	    function ? ", function \"" : "",
948 	    function ? function : "",
949 	    function ? "\"" : "");
950 
951 	_sys_write(STDERR_FILENO, buf, (size_t)len);
952 	(void)kill(getpid(), SIGABRT);
953 
954 	_exit(1);
955 }
956 
957 
958 void
959 pthread__errorfunc(char *file, int line, char *function, char *msg)
960 {
961 	char buf[1024];
962 	size_t len;
963 
964 	if (pthread__diagassert == 0)
965 		return;
966 
967 	/*
968 	 * snprintf should not acquire any locks, or we could
969 	 * end up deadlocked if the assert caller held locks.
970 	 */
971 	len = snprintf(buf, 1024,
972 	    "%s: Error detected by libpthread: %s.\n"
973 	    "Detected by file \"%s\", line %d%s%s%s.\n"
974 	    "See pthread(3) for information.\n",
975 	    getprogname(), msg, file, line,
976 	    function ? ", function \"" : "",
977 	    function ? function : "",
978 	    function ? "\"" : "");
979 
980 	if (pthread__diagassert & DIAGASSERT_STDERR)
981 		_sys_write(STDERR_FILENO, buf, len);
982 
983 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
984 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
985 
986 	if (pthread__diagassert & DIAGASSERT_ABORT) {
987 		(void)kill(getpid(), SIGABRT);
988 		_exit(1);
989 	}
990 }
991