xref: /netbsd-src/lib/libpthread/pthread.c (revision 73704c4ce4ee2a60eb617e693ce7e9f03902613e)
1 /*	$NetBSD: pthread.c,v 1.29 2003/08/13 18:52:01 nathanw Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001,2002,2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.29 2003/08/13 18:52:01 nathanw Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <syslog.h>
50 #include <ucontext.h>
51 #include <unistd.h>
52 
53 #include <sched.h>
54 #include "pthread.h"
55 #include "pthread_int.h"
56 
57 #ifdef PTHREAD_MAIN_DEBUG
58 #define SDPRINTF(x) DPRINTF(x)
59 #else
60 #define SDPRINTF(x)
61 #endif
62 
63 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
64 
65 int pthread__started;
66 
67 pthread_spin_t pthread__allqueue_lock;
68 struct pthread_queue_t pthread__allqueue;
69 
70 pthread_spin_t pthread__deadqueue_lock;
71 struct pthread_queue_t pthread__deadqueue;
72 struct pthread_queue_t pthread__reidlequeue;
73 
74 static int nthreads;
75 static int nextthread;
76 static pthread_spin_t nextthread_lock;
77 static pthread_attr_t pthread_default_attr;
78 
79 enum {
80 	DIAGASSERT_ABORT =	1<<0,
81 	DIAGASSERT_STDERR =	1<<1,
82 	DIAGASSERT_SYSLOG =	1<<2
83 };
84 
85 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
86 
87 pthread_spin_t pthread__runqueue_lock;
88 struct pthread_queue_t pthread__runqueue;
89 struct pthread_queue_t pthread__idlequeue;
90 
91 __strong_alias(__libc_thr_self,pthread_self)
92 __strong_alias(__libc_thr_create,pthread_create)
93 __strong_alias(__libc_thr_exit,pthread_exit)
94 __strong_alias(__libc_thr_errno,pthread__errno)
95 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
96 
97 /*
98  * Static library kludge.  Place a reference to a symbol any library
99  * file which does not already have a reference here.
100  */
101 extern int pthread__cancel_stub_binder;
102 extern int pthread__sched_binder;
103 extern struct pthread_queue_t pthread__nanosleeping;
104 
105 void *pthread__static_lib_binder[] = {
106 	&pthread__cancel_stub_binder,
107 	pthread_cond_init,
108 	pthread_mutex_init,
109 	pthread_rwlock_init,
110 	pthread_barrier_init,
111 	pthread_key_create,
112 	pthread_setspecific,
113 	&pthread__sched_binder,
114 	&pthread__nanosleeping
115 };
116 
117 /*
118  * This needs to be started by the library loading code, before main()
119  * gets to run, for various things that use the state of the initial thread
120  * to work properly (thread-specific data is an application-visible example;
121  * spinlock counts for mutexes is an internal example).
122  */
123 void
124 pthread_init(void)
125 {
126 	pthread_t first;
127 	char *p;
128 	extern int __isthreaded;
129 
130 	/* Initialize locks first; they're needed elsewhere. */
131 	pthread__lockprim_init();
132 
133 	/* Basic data structure setup */
134 	pthread_attr_init(&pthread_default_attr);
135 	PTQ_INIT(&pthread__allqueue);
136 	PTQ_INIT(&pthread__deadqueue);
137 	PTQ_INIT(&pthread__reidlequeue);
138 	PTQ_INIT(&pthread__runqueue);
139 	PTQ_INIT(&pthread__idlequeue);
140 	nthreads = 1;
141 
142 	/* Create the thread structure corresponding to main() */
143 	pthread__initmain(&first);
144 	pthread__initthread(first, first);
145 	first->pt_state = PT_STATE_RUNNING;
146 	sigprocmask(0, NULL, &first->pt_sigmask);
147 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
148 
149 	/* Start subsystems */
150 	pthread__signal_init();
151 	PTHREAD_MD_INIT
152 #ifdef PTHREAD__DEBUG
153 	pthread__debug_init();
154 #endif
155 
156 	for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
157 		switch (*p) {
158 		case 'a':
159 			pthread__diagassert |= DIAGASSERT_ABORT;
160 			break;
161 		case 'A':
162 			pthread__diagassert &= ~DIAGASSERT_ABORT;
163 			break;
164 		case 'e':
165 			pthread__diagassert |= DIAGASSERT_STDERR;
166 			break;
167 		case 'E':
168 			pthread__diagassert &= ~DIAGASSERT_STDERR;
169 			break;
170 		case 'l':
171 			pthread__diagassert |= DIAGASSERT_SYSLOG;
172 			break;
173 		case 'L':
174 			pthread__diagassert &= ~DIAGASSERT_SYSLOG;
175 			break;
176 		}
177 	}
178 
179 
180 	/* Tell libc that we're here and it should role-play accordingly. */
181 	__isthreaded = 1;
182 }
183 
184 static void
185 pthread__child_callback(void)
186 {
187 	/*
188 	 * Clean up data structures that a forked child process might
189 	 * trip over. Note that if threads have been created (causing
190 	 * this handler to be registered) the standards say that the
191 	 * child will trigger undefined behavior if it makes any
192 	 * pthread_* calls (or any other calls that aren't
193 	 * async-signal-safe), so we don't really have to clean up
194 	 * much. Anything that permits some pthread_* calls to work is
195 	 * merely being polite.
196 	 */
197 	pthread__started = 0;
198 }
199 
200 static void
201 pthread__start(void)
202 {
203 	pthread_t self, idle;
204 	int i, ret;
205 
206 	self = pthread__self(); /* should be the "main()" thread */
207 
208 	/*
209 	 * Per-process timers are cleared by fork(); despite the
210 	 * various restrictions on fork() and threads, it's legal to
211 	 * fork() before creating any threads.
212 	 */
213 	pthread__alarm_init();
214 
215 	pthread_atfork(NULL, NULL, pthread__child_callback);
216 
217 	/* Create idle threads */
218 	for (i = 0; i < NIDLETHREADS; i++) {
219 		ret = pthread__stackalloc(&idle);
220 		if (ret != 0)
221 			err(1, "Couldn't allocate stack for idle thread!");
222 		pthread__initthread(self, idle);
223 		sigfillset(&idle->pt_sigmask);
224 		idle->pt_type = PT_THREAD_IDLE;
225 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
226 		pthread__sched_idle(self, idle);
227 	}
228 
229 	/* Start up the SA subsystem */
230 	pthread__sa_start();
231 	SDPRINTF(("(pthread__start %p) Started.\n", self));
232 }
233 
234 
235 /* General-purpose thread data structure sanitization. */
236 void
237 pthread__initthread(pthread_t self, pthread_t t)
238 {
239 	int id;
240 
241 	pthread_spinlock(self, &nextthread_lock);
242 	id = nextthread;
243 	nextthread++;
244 	pthread_spinunlock(self, &nextthread_lock);
245 	t->pt_num = id;
246 
247 	t->pt_magic = PT_MAGIC;
248 	t->pt_type = PT_THREAD_NORMAL;
249 	t->pt_state = PT_STATE_RUNNABLE;
250 	pthread_lockinit(&t->pt_statelock);
251 	pthread_lockinit(&t->pt_flaglock);
252 	t->pt_spinlocks = 0;
253 	t->pt_next = NULL;
254 	t->pt_exitval = NULL;
255 	t->pt_flags = 0;
256 	t->pt_cancel = 0;
257 	t->pt_errno = 0;
258 	t->pt_parent = NULL;
259 	t->pt_heldlock = NULL;
260 	t->pt_switchto = NULL;
261 	t->pt_trapuc = NULL;
262 	sigemptyset(&t->pt_siglist);
263 	sigemptyset(&t->pt_sigmask);
264 	pthread_lockinit(&t->pt_siglock);
265 	PTQ_INIT(&t->pt_joiners);
266 	pthread_lockinit(&t->pt_join_lock);
267 	PTQ_INIT(&t->pt_cleanup_stack);
268 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
269 	t->pt_name = NULL;
270 #ifdef PTHREAD__DEBUG
271 	t->blocks = 0;
272 	t->preempts = 0;
273 	t->rescheds = 0;
274 #endif
275 }
276 
277 
278 int
279 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
280 	    void *(*startfunc)(void *), void *arg)
281 {
282 	pthread_t self, newthread;
283 	pthread_attr_t nattr;
284 	struct pthread_attr_private *p;
285 	char *name;
286 	int ret;
287 
288 	PTHREADD_ADD(PTHREADD_CREATE);
289 
290 	/*
291 	 * It's okay to check this without a lock because there can
292 	 * only be one thread before it becomes true.
293 	 */
294 	if (pthread__started == 0) {
295 		pthread__start();
296 		pthread__started = 1;
297 	}
298 
299 	if (attr == NULL)
300 		nattr = pthread_default_attr;
301 	else if (attr->pta_magic == PT_ATTR_MAGIC)
302 		nattr = *attr;
303 	else
304 		return EINVAL;
305 
306 	/* Fetch misc. attributes from the attr structure. */
307 	name = NULL;
308 	if ((p = nattr.pta_private) != NULL)
309 		if (p->ptap_name[0] != '\0')
310 			if ((name = strdup(p->ptap_name)) == NULL)
311 				return ENOMEM;
312 
313 	self = pthread__self();
314 
315 	pthread_spinlock(self, &pthread__deadqueue_lock);
316 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
317 		newthread = PTQ_FIRST(&pthread__deadqueue);
318 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
319 		pthread_spinunlock(self, &pthread__deadqueue_lock);
320 	} else {
321 		pthread_spinunlock(self, &pthread__deadqueue_lock);
322 		/* Set up a stack and allocate space for a pthread_st. */
323 		ret = pthread__stackalloc(&newthread);
324 		if (ret != 0)
325 			return ret;
326 	}
327 
328 	/* 2. Set up state. */
329 	pthread__initthread(self, newthread);
330 	newthread->pt_flags = nattr.pta_flags;
331 	newthread->pt_sigmask = self->pt_sigmask;
332 
333 	/* 3. Set up misc. attributes. */
334 	newthread->pt_name = name;
335 
336 	/*
337 	 * 4. Set up context.
338 	 *
339 	 * The pt_uc pointer points to a location safely below the
340 	 * stack start; this is arranged by pthread__stackalloc().
341 	 */
342 	_INITCONTEXT_U(newthread->pt_uc);
343 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
344 	pthread__uc_id(newthread->pt_uc) = newthread;
345 #endif
346 	newthread->pt_uc->uc_stack = newthread->pt_stack;
347 	newthread->pt_uc->uc_link = NULL;
348 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
349 	    startfunc, arg);
350 
351 	/* 5. Add to list of all threads. */
352 	pthread_spinlock(self, &pthread__allqueue_lock);
353 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
354 	nthreads++;
355 	pthread_spinunlock(self, &pthread__allqueue_lock);
356 
357 	SDPRINTF(("(pthread_create %p) Created new thread %p (name pointer %p).\n", self, newthread, newthread->pt_name));
358 	/* 6. Put on run queue. */
359 	pthread__sched(self, newthread);
360 
361 	*thread = newthread;
362 
363 	return 0;
364 }
365 
366 
367 static void
368 pthread__create_tramp(void *(*start)(void *), void *arg)
369 {
370 	void *retval;
371 
372 	retval = start(arg);
373 
374 	pthread_exit(retval);
375 
376 	/*NOTREACHED*/
377 	pthread__abort();
378 }
379 
380 
381 /*
382  * Other threads will switch to the idle thread so that they
383  * can dispose of any awkward locks or recycle upcall state.
384  */
385 void
386 pthread__idle(void)
387 {
388 	pthread_t self;
389 
390 	PTHREADD_ADD(PTHREADD_IDLE);
391 	self = pthread__self();
392 	SDPRINTF(("(pthread__idle %p).\n", self));
393 
394 	/*
395 	 * The drill here is that we want to yield the processor,
396 	 * but for the thread itself to be recovered, we need to be on
397 	 * a list somewhere for the thread system to know about us.
398 	 */
399 	pthread_spinlock(self, &pthread__deadqueue_lock);
400 	PTQ_INSERT_TAIL(&pthread__reidlequeue, self, pt_runq);
401 	/* Don't need a flag lock; nothing else has a handle on this thread */
402 	self->pt_flags |= PT_FLAG_IDLED;
403 	pthread_spinunlock(self, &pthread__deadqueue_lock);
404 
405 	/*
406 	 * If we get to run this, then no preemption has happened
407 	 * (because the upcall handler will not continue an idle thread with
408 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
409 	 */
410 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
411 	sa_yield();
412 
413 	/* NOTREACHED */
414 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
415 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
416 	pthread__abort();
417 }
418 
419 
420 void
421 pthread_exit(void *retval)
422 {
423 	pthread_t self;
424 	struct pt_clean_t *cleanup;
425 	char *name;
426 	int nt, flags;
427 
428 	self = pthread__self();
429 	SDPRINTF(("(pthread_exit %p) Exiting (status %p, flags %x, cancel %d).\n", self, retval, self->pt_flags, self->pt_cancel));
430 
431 	/* Disable cancellability. */
432 	pthread_spinlock(self, &self->pt_flaglock);
433 	self->pt_flags |= PT_FLAG_CS_DISABLED;
434 	flags = self->pt_flags;
435 	self->pt_cancel = 0;
436 	pthread_spinunlock(self, &self->pt_flaglock);
437 
438 	/* Call any cancellation cleanup handlers */
439 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
440 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
441 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
442 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
443 	}
444 
445 	/* Perform cleanup of thread-specific data */
446 	pthread__destroy_tsd(self);
447 
448 	self->pt_exitval = retval;
449 
450 	if (flags & PT_FLAG_DETACHED) {
451 		name = self->pt_name;
452 		self->pt_name = NULL;
453 
454 		if (name != NULL)
455 			free(name);
456 
457 		pthread_spinlock(self, &pthread__allqueue_lock);
458 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
459 		nthreads--;
460 		nt = nthreads;
461 		pthread_spinunlock(self, &pthread__allqueue_lock);
462 
463 		self->pt_state = PT_STATE_DEAD;
464 		if (nt == 0) {
465 			/* Whoah, we're the last one. Time to go. */
466 			exit(0);
467 		}
468 
469 		/* Yeah, yeah, doing work while we're dead is tacky. */
470 		pthread_spinlock(self, &pthread__deadqueue_lock);
471 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
472 		pthread__block(self, &pthread__deadqueue_lock);
473 	} else {
474 		/* Note: name will be freed by the joiner. */
475 		pthread_spinlock(self, &self->pt_join_lock);
476 		pthread_spinlock(self, &pthread__allqueue_lock);
477 		nthreads--;
478 		nt = nthreads;
479 		self->pt_state = PT_STATE_ZOMBIE;
480 		pthread_spinunlock(self, &pthread__allqueue_lock);
481 		if (nt == 0) {
482 			/* Whoah, we're the last one. Time to go. */
483 			exit(0);
484 		}
485 		/*
486 		 * Wake up all the potential joiners. Only one can win.
487 		 * (Can you say "Thundering Herd"? I knew you could.)
488 		 */
489 		pthread__sched_sleepers(self, &self->pt_joiners);
490 		pthread__block(self, &self->pt_join_lock);
491 	}
492 
493 	/*NOTREACHED*/
494 	pthread__abort();
495 	exit(1);
496 }
497 
498 
499 int
500 pthread_join(pthread_t thread, void **valptr)
501 {
502 	pthread_t self;
503 	char *name;
504 	int num;
505 
506 	self = pthread__self();
507 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
508 
509 	if (pthread__find(self, thread) != 0)
510 		return ESRCH;
511 
512 	if (thread->pt_magic != PT_MAGIC)
513 		return EINVAL;
514 
515 	if (thread == self)
516 		return EDEADLK;
517 
518 	pthread_spinlock(self, &thread->pt_flaglock);
519 
520 	if (thread->pt_flags & PT_FLAG_DETACHED) {
521 		pthread_spinunlock(self, &thread->pt_flaglock);
522 		return EINVAL;
523 	}
524 
525 	num = thread->pt_num;
526 	pthread_spinlock(self, &thread->pt_join_lock);
527 	while (thread->pt_state != PT_STATE_ZOMBIE) {
528 		if ((thread->pt_state == PT_STATE_DEAD) ||
529 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
530 		    (thread->pt_num != num)) {
531 			/*
532 			 * Another thread beat us to the join, or called
533 			 * pthread_detach(). If num didn't match, the
534 			 * thread died and was recycled before we got
535 			 * another chance to run.
536 			 */
537 			pthread_spinunlock(self, &thread->pt_join_lock);
538 			pthread_spinunlock(self, &thread->pt_flaglock);
539 			return ESRCH;
540 		}
541 		/*
542 		 * "I'm not dead yet!"
543 		 * "You will be soon enough."
544 		 */
545 		pthread_spinunlock(self, &thread->pt_flaglock);
546 		pthread_spinlock(self, &self->pt_statelock);
547 		if (self->pt_cancel) {
548 			pthread_spinunlock(self, &self->pt_statelock);
549 			pthread_spinunlock(self, &thread->pt_join_lock);
550 			pthread_exit(PTHREAD_CANCELED);
551 		}
552 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
553 		self->pt_sleepobj = thread;
554 		self->pt_sleepq = &thread->pt_joiners;
555 		self->pt_sleeplock = &thread->pt_join_lock;
556 		pthread_spinunlock(self, &self->pt_statelock);
557 
558 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
559 		pthread__block(self, &thread->pt_join_lock);
560 		pthread_spinlock(self, &thread->pt_flaglock);
561 		pthread_spinlock(self, &thread->pt_join_lock);
562 	}
563 
564 	/* All ours. */
565 	thread->pt_state = PT_STATE_DEAD;
566 	name = thread->pt_name;
567 	thread->pt_name = NULL;
568 	pthread_spinunlock(self, &thread->pt_join_lock);
569 	pthread_spinunlock(self, &thread->pt_flaglock);
570 
571 	if (valptr != NULL)
572 		*valptr = thread->pt_exitval;
573 
574 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
575 
576 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
577 	pthread_spinlock(self, &pthread__allqueue_lock);
578 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
579 	pthread_spinunlock(self, &pthread__allqueue_lock);
580 
581 	pthread_spinlock(self, &pthread__deadqueue_lock);
582 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
583 	pthread_spinunlock(self, &pthread__deadqueue_lock);
584 
585 	if (name != NULL)
586 		free(name);
587 
588 	return 0;
589 }
590 
591 
592 int
593 pthread_equal(pthread_t t1, pthread_t t2)
594 {
595 
596 	/* Nothing special here. */
597 	return (t1 == t2);
598 }
599 
600 
601 int
602 pthread_detach(pthread_t thread)
603 {
604 	pthread_t self;
605 
606 	self = pthread__self();
607 
608 	if (pthread__find(self, thread) != 0)
609 		return ESRCH;
610 
611 	if (thread->pt_magic != PT_MAGIC)
612 		return EINVAL;
613 
614 	pthread_spinlock(self, &thread->pt_flaglock);
615 	pthread_spinlock(self, &thread->pt_join_lock);
616 
617 	if (thread->pt_flags & PT_FLAG_DETACHED) {
618 		pthread_spinunlock(self, &thread->pt_join_lock);
619 		pthread_spinunlock(self, &thread->pt_flaglock);
620 		return EINVAL;
621 	}
622 
623 	thread->pt_flags |= PT_FLAG_DETACHED;
624 
625 	/* Any joiners have to be punted now. */
626 	pthread__sched_sleepers(self, &thread->pt_joiners);
627 
628 	pthread_spinunlock(self, &thread->pt_join_lock);
629 	pthread_spinunlock(self, &thread->pt_flaglock);
630 
631 	return 0;
632 }
633 
634 
635 int
636 pthread_getname_np(pthread_t thread, char *name, size_t len)
637 {
638 	pthread_t self;
639 
640 	self = pthread__self();
641 
642 	if (pthread__find(self, thread) != 0)
643 		return ESRCH;
644 
645 	if (thread->pt_magic != PT_MAGIC)
646 		return EINVAL;
647 
648 	pthread_spinlock(self, &thread->pt_join_lock);
649 	if (thread->pt_name == NULL)
650 		name[0] = '\0';
651 	else
652 		strlcpy(name, thread->pt_name, len);
653 	pthread_spinunlock(self, &thread->pt_join_lock);
654 
655 	return 0;
656 }
657 
658 
659 int
660 pthread_setname_np(pthread_t thread, const char *name, void *arg)
661 {
662 	pthread_t self = pthread_self();
663 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
664 	int namelen;
665 
666 	if (pthread__find(self, thread) != 0)
667 		return ESRCH;
668 
669 	if (thread->pt_magic != PT_MAGIC)
670 		return EINVAL;
671 
672 	namelen = snprintf(newname, sizeof(newname), name, arg);
673 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
674 		return EINVAL;
675 
676 	cp = strdup(newname);
677 	if (cp == NULL)
678 		return ENOMEM;
679 
680 	pthread_spinlock(self, &thread->pt_join_lock);
681 
682 	if (thread->pt_state == PT_STATE_DEAD) {
683 		pthread_spinunlock(self, &thread->pt_join_lock);
684 		free(cp);
685 		return EINVAL;
686 	}
687 
688 	oldname = thread->pt_name;
689 	thread->pt_name = cp;
690 
691 	pthread_spinunlock(self, &thread->pt_join_lock);
692 
693 	if (oldname != NULL)
694 		free(oldname);
695 
696 	return 0;
697 }
698 
699 
700 
701 /*
702  * XXX There should be a way for applications to use the efficent
703  *  inline version, but there are opacity/namespace issues.
704  */
705 pthread_t
706 pthread_self(void)
707 {
708 
709 	return pthread__self();
710 }
711 
712 
713 int
714 pthread_cancel(pthread_t thread)
715 {
716 	pthread_t self;
717 
718 	if (!(thread->pt_state == PT_STATE_RUNNING ||
719 	    thread->pt_state == PT_STATE_RUNNABLE ||
720 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE ||
721 	    thread->pt_state == PT_STATE_BLOCKED_SYS))
722 		return ESRCH;
723 
724 	self = pthread__self();
725 
726 	pthread_spinlock(self, &thread->pt_flaglock);
727 	thread->pt_flags |= PT_FLAG_CS_PENDING;
728 	if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
729 		thread->pt_cancel = 1;
730 		pthread_spinunlock(self, &thread->pt_flaglock);
731 		pthread_spinlock(self, &thread->pt_statelock);
732 		if (thread->pt_state == PT_STATE_BLOCKED_SYS) {
733 			/*
734 			 * It's sleeping in the kernel. If we can wake
735 			 * it up, it will notice the cancellation when
736 			 * it returns. If it doesn't wake up when we
737 			 * make this call, then it's blocked
738 			 * uninterruptably in the kernel, and there's
739 			 * not much to be done about it.
740 			 */
741 			_lwp_wakeup(thread->pt_blockedlwp);
742 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
743 			/*
744 			 * We're blocked somewhere (pthread__block()
745 			 * was called). Cause it to wake up; it will
746 			 * check for the cancellation if the routine
747 			 * is a cancellation point, and loop and reblock
748 			 * otherwise.
749 			 */
750 			pthread_spinlock(self, thread->pt_sleeplock);
751 			PTQ_REMOVE(thread->pt_sleepq, thread,
752 			    pt_sleep);
753 			pthread_spinunlock(self, thread->pt_sleeplock);
754 			pthread__sched(self, thread);
755 		} else {
756 			/*
757 			 * Nothing. The target thread is running and will
758 			 * notice at the next deferred cancellation point.
759 			 */
760 		}
761 		pthread_spinunlock(self, &thread->pt_statelock);
762 	} else
763 		pthread_spinunlock(self, &thread->pt_flaglock);
764 
765 	return 0;
766 }
767 
768 
769 int
770 pthread_setcancelstate(int state, int *oldstate)
771 {
772 	pthread_t self;
773 	int retval;
774 
775 	self = pthread__self();
776 	retval = 0;
777 
778 	pthread_spinlock(self, &self->pt_flaglock);
779 	if (oldstate != NULL) {
780 		if (self->pt_flags & PT_FLAG_CS_DISABLED)
781 			*oldstate = PTHREAD_CANCEL_DISABLE;
782 		else
783 			*oldstate = PTHREAD_CANCEL_ENABLE;
784 	}
785 
786 	if (state == PTHREAD_CANCEL_DISABLE) {
787 		self->pt_flags |= PT_FLAG_CS_DISABLED;
788 		if (self->pt_cancel) {
789 			self->pt_flags |= PT_FLAG_CS_PENDING;
790 			self->pt_cancel = 0;
791 		}
792 	} else if (state == PTHREAD_CANCEL_ENABLE) {
793 		self->pt_flags &= ~PT_FLAG_CS_DISABLED;
794 		/*
795 		 * If a cancellation was requested while cancellation
796 		 * was disabled, note that fact for future
797 		 * cancellation tests.
798 		 */
799 		if (self->pt_flags & PT_FLAG_CS_PENDING) {
800 			self->pt_cancel = 1;
801 			/* This is not a deferred cancellation point. */
802 			if (self->pt_flags & PT_FLAG_CS_ASYNC) {
803 				pthread_spinunlock(self, &self->pt_flaglock);
804 				pthread_exit(PTHREAD_CANCELED);
805 			}
806 		}
807 	} else
808 		retval = EINVAL;
809 
810 	pthread_spinunlock(self, &self->pt_flaglock);
811 	return retval;
812 }
813 
814 
815 int
816 pthread_setcanceltype(int type, int *oldtype)
817 {
818 	pthread_t self;
819 	int retval;
820 
821 	self = pthread__self();
822 	retval = 0;
823 
824 	pthread_spinlock(self, &self->pt_flaglock);
825 
826 	if (oldtype != NULL) {
827 		if (self->pt_flags & PT_FLAG_CS_ASYNC)
828 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
829 		else
830 			*oldtype = PTHREAD_CANCEL_DEFERRED;
831 	}
832 
833 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
834 		self->pt_flags |= PT_FLAG_CS_ASYNC;
835 		if (self->pt_cancel) {
836 			pthread_spinunlock(self, &self->pt_flaglock);
837 			pthread_exit(PTHREAD_CANCELED);
838 		}
839 	} else if (type == PTHREAD_CANCEL_DEFERRED)
840 		self->pt_flags &= ~PT_FLAG_CS_ASYNC;
841 	else
842 		retval = EINVAL;
843 
844 	pthread_spinunlock(self, &self->pt_flaglock);
845 	return retval;
846 }
847 
848 
849 void
850 pthread_testcancel()
851 {
852 	pthread_t self;
853 
854 	self = pthread__self();
855 	if (self->pt_cancel)
856 		pthread_exit(PTHREAD_CANCELED);
857 }
858 
859 
860 /*
861  * POSIX requires that certain functions return an error rather than
862  * invoking undefined behavior even when handed completely bogus
863  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
864  * utility routine searches the list of threads for the pthread_t
865  * value without dereferencing it.
866  */
867 int
868 pthread__find(pthread_t self, pthread_t id)
869 {
870 	pthread_t target;
871 
872 	pthread_spinlock(self, &pthread__allqueue_lock);
873 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
874 	    if (target == id)
875 		    break;
876 	pthread_spinunlock(self, &pthread__allqueue_lock);
877 
878 	if (target == NULL)
879 		return ESRCH;
880 
881 	return 0;
882 }
883 
884 
885 void
886 pthread__testcancel(pthread_t self)
887 {
888 
889 	if (self->pt_cancel)
890 		pthread_exit(PTHREAD_CANCELED);
891 }
892 
893 
894 void
895 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
896 {
897 	pthread_t self;
898 	struct pt_clean_t *entry;
899 
900 	self = pthread__self();
901 	entry = store;
902 	entry->ptc_cleanup = cleanup;
903 	entry->ptc_arg = arg;
904 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
905 }
906 
907 
908 void
909 pthread__cleanup_pop(int ex, void *store)
910 {
911 	pthread_t self;
912 	struct pt_clean_t *entry;
913 
914 	self = pthread__self();
915 	entry = store;
916 
917 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
918 	if (ex)
919 		(*entry->ptc_cleanup)(entry->ptc_arg);
920 }
921 
922 
923 int *
924 pthread__errno(void)
925 {
926 	pthread_t self;
927 
928 	self = pthread__self();
929 
930 	return &(self->pt_errno);
931 }
932 
933 ssize_t	_sys_write(int, const void *, size_t);
934 
935 void
936 pthread__assertfunc(char *file, int line, char *function, char *expr)
937 {
938 	char buf[1024];
939 	int len;
940 
941 	/*
942 	 * snprintf should not acquire any locks, or we could
943 	 * end up deadlocked if the assert caller held locks.
944 	 */
945 	len = snprintf(buf, 1024,
946 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
947 	    expr, file, line,
948 	    function ? ", function \"" : "",
949 	    function ? function : "",
950 	    function ? "\"" : "");
951 
952 	_sys_write(STDERR_FILENO, buf, (size_t)len);
953 	(void)kill(getpid(), SIGABRT);
954 
955 	_exit(1);
956 }
957 
958 
959 void
960 pthread__errorfunc(char *file, int line, char *function, char *msg)
961 {
962 	char buf[1024];
963 	size_t len;
964 
965 	if (pthread__diagassert == 0)
966 		return;
967 
968 	/*
969 	 * snprintf should not acquire any locks, or we could
970 	 * end up deadlocked if the assert caller held locks.
971 	 */
972 	len = snprintf(buf, 1024,
973 	    "%s: Error detected by libpthread: %s.\n"
974 	    "Detected by file \"%s\", line %d%s%s%s.\n"
975 	    "See pthread(3) for information.\n",
976 	    getprogname(), msg, file, line,
977 	    function ? ", function \"" : "",
978 	    function ? function : "",
979 	    function ? "\"" : "");
980 
981 	if (pthread__diagassert & DIAGASSERT_STDERR)
982 		_sys_write(STDERR_FILENO, buf, len);
983 
984 	if (pthread__diagassert & DIAGASSERT_SYSLOG)
985 		syslog(LOG_DEBUG | LOG_USER, "%s", buf);
986 
987 	if (pthread__diagassert & DIAGASSERT_ABORT) {
988 		(void)kill(getpid(), SIGABRT);
989 		_exit(1);
990 	}
991 }
992