xref: /netbsd-src/lib/libpthread/pthread.c (revision 1ffa7b76c40339c17a0fb2a09fac93f287cfc046)
1 /*	$NetBSD: pthread.c,v 1.18 2003/04/28 17:46:30 nathanw Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.18 2003/04/28 17:46:30 nathanw Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <ucontext.h>
50 #include <unistd.h>
51 
52 #include <sched.h>
53 #include "pthread.h"
54 #include "pthread_int.h"
55 
56 #ifdef PTHREAD_MAIN_DEBUG
57 #define SDPRINTF(x) DPRINTF(x)
58 #else
59 #define SDPRINTF(x)
60 #endif
61 
62 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
63 
64 int pthread__started;
65 
66 pthread_spin_t pthread__allqueue_lock;
67 struct pthread_queue_t pthread__allqueue;
68 
69 pthread_spin_t pthread__deadqueue_lock;
70 struct pthread_queue_t pthread__deadqueue;
71 struct pthread_queue_t pthread__reidlequeue;
72 
73 static int nthreads;
74 static int nextthread;
75 static pthread_spin_t nextthread_lock;
76 static pthread_attr_t pthread_default_attr;
77 
78 #define PTHREAD_ERRORMODE_ABORT		1
79 #define PTHREAD_ERRORMODE_PRINT       	2
80 #define PTHREAD_ERRORMODE_IGNORE	3
81 
82 static int pthread__errormode;
83 
84 pthread_spin_t pthread__runqueue_lock;
85 struct pthread_queue_t pthread__runqueue;
86 struct pthread_queue_t pthread__idlequeue;
87 
88 __strong_alias(__libc_thr_self,pthread_self)
89 __strong_alias(__libc_thr_create,pthread_create)
90 __strong_alias(__libc_thr_exit,pthread_exit)
91 __strong_alias(__libc_thr_errno,pthread__errno)
92 
93 /*
94  * Static library kludge.  Place a reference to a symbol any library
95  * file which does not already have a reference here.
96  */
97 extern int pthread__cancel_stub_binder;
98 extern int pthread__sched_binder;
99 extern struct pthread_queue_t pthread__nanosleeping;
100 
101 void *pthread__static_lib_binder[] = {
102 	&pthread__cancel_stub_binder,
103 	pthread_cond_init,
104 	pthread_mutex_init,
105 	pthread_rwlock_init,
106 	pthread_barrier_init,
107 	pthread_key_create,
108 	&pthread__sched_binder,
109 	&pthread__nanosleeping
110 };
111 
112 /* Private data for pthread_attr_t */
113 struct pthread_attr_private {
114 	char ptap_name[PTHREAD_MAX_NAMELEN_NP];
115 	void *ptap_namearg;
116 };
117 
118 /*
119  * This needs to be started by the library loading code, before main()
120  * gets to run, for various things that use the state of the initial thread
121  * to work properly (thread-specific data is an application-visible example;
122  * spinlock counts for mutexes is an internal example).
123  */
124 void
125 pthread_init(void)
126 {
127 	pthread_t first;
128 	char *mode;
129 	extern int __isthreaded;
130 
131 	/* Initialize locks first; they're needed elsewhere. */
132 	pthread__lockprim_init();
133 
134 	/* Basic data structure setup */
135 	pthread_attr_init(&pthread_default_attr);
136 	PTQ_INIT(&pthread__allqueue);
137 	PTQ_INIT(&pthread__deadqueue);
138 	PTQ_INIT(&pthread__reidlequeue);
139 	PTQ_INIT(&pthread__runqueue);
140 	PTQ_INIT(&pthread__idlequeue);
141 
142 	/* Create the thread structure corresponding to main() */
143 	pthread__initmain(&first);
144 	pthread__initthread(first, first);
145 	first->pt_state = PT_STATE_RUNNING;
146 	sigprocmask(0, NULL, &first->pt_sigmask);
147 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
148 
149 	/* Start subsystems */
150 	pthread__signal_init();
151 	PTHREAD_MD_INIT
152 #ifdef PTHREAD__DEBUG
153 	pthread__debug_init();
154 #endif
155 
156 	pthread__errormode = PTHREAD_ERRORMODE_ABORT;
157 	if ((mode = getenv("PTHREAD_ERRORMODE")) != NULL) {
158 		if (strcasecmp(mode, "ignore") == 0)
159 			pthread__errormode = PTHREAD_ERRORMODE_IGNORE;
160 		else if (strcasecmp(mode, "print") == 0)
161 			pthread__errormode = PTHREAD_ERRORMODE_PRINT;
162 		else if (strcasecmp(mode, "abort") == 0)
163 			pthread__errormode = PTHREAD_ERRORMODE_ABORT;
164 	}
165 
166 	/* Tell libc that we're here and it should role-play accordingly. */
167 	__isthreaded = 1;
168 }
169 
170 static void
171 pthread__child_callback(void)
172 {
173 	/*
174 	 * Clean up data structures that a forked child process might
175 	 * trip over. Note that if threads have been created (causing
176 	 * this handler to be registered) the standards say that the
177 	 * child will trigger undefined behavior if it makes any
178 	 * pthread_* calls (or any other calls that aren't
179 	 * async-signal-safe), so we don't really have to clean up
180 	 * much. Anything that permits some pthread_* calls to work is
181 	 * merely being polite.
182 	 */
183 	pthread__started = 0;
184 }
185 
186 static void
187 pthread__start(void)
188 {
189 	pthread_t self, idle;
190 	int i, ret;
191 
192 	self = pthread__self(); /* should be the "main()" thread */
193 
194 	/*
195 	 * Per-process timers are cleared by fork(); despite the
196 	 * various restrictions on fork() and threads, it's legal to
197 	 * fork() before creating any threads.
198 	 */
199 	pthread__alarm_init();
200 
201 	pthread_atfork(NULL, NULL, pthread__child_callback);
202 
203 	/* Create idle threads */
204 	for (i = 0; i < NIDLETHREADS; i++) {
205 		ret = pthread__stackalloc(&idle);
206 		if (ret != 0)
207 			err(1, "Couldn't allocate stack for idle thread!");
208 		pthread__initthread(self, idle);
209 		sigfillset(&idle->pt_sigmask);
210 		idle->pt_type = PT_THREAD_IDLE;
211 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
212 		pthread__sched_idle(self, idle);
213 	}
214 
215 	nthreads = 1;
216 	/* Start up the SA subsystem */
217 	pthread__sa_start();
218 	SDPRINTF(("(pthread__start %p) Started.\n", self));
219 }
220 
221 
222 /* General-purpose thread data structure sanitization. */
223 void
224 pthread__initthread(pthread_t self, pthread_t t)
225 {
226 	int id;
227 
228 	pthread_spinlock(self, &nextthread_lock);
229 	id = nextthread;
230 	nextthread++;
231 	pthread_spinunlock(self, &nextthread_lock);
232 	t->pt_num = id;
233 
234 	t->pt_magic = PT_MAGIC;
235 	t->pt_type = PT_THREAD_NORMAL;
236 	t->pt_state = PT_STATE_RUNNABLE;
237 	pthread_lockinit(&t->pt_statelock);
238 	t->pt_spinlocks = 0;
239 	t->pt_next = NULL;
240 	t->pt_exitval = NULL;
241 	t->pt_flags = 0;
242 	t->pt_cancel = 0;
243 	t->pt_errno = 0;
244 	t->pt_parent = NULL;
245 	t->pt_heldlock = NULL;
246 	t->pt_switchto = NULL;
247 	t->pt_sleepuc = NULL;
248 	sigemptyset(&t->pt_siglist);
249 	sigemptyset(&t->pt_sigmask);
250 	pthread_lockinit(&t->pt_siglock);
251 	PTQ_INIT(&t->pt_joiners);
252 	pthread_lockinit(&t->pt_join_lock);
253 	PTQ_INIT(&t->pt_cleanup_stack);
254 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
255 	t->pt_name = NULL;
256 #ifdef PTHREAD__DEBUG
257 	t->blocks = 0;
258 	t->preempts = 0;
259 	t->rescheds = 0;
260 #endif
261 }
262 
263 
264 int
265 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
266 	    void *(*startfunc)(void *), void *arg)
267 {
268 	pthread_t self, newthread;
269 	pthread_attr_t nattr;
270 	struct pthread_attr_private *p;
271 	char *name;
272 	int ret;
273 
274 	PTHREADD_ADD(PTHREADD_CREATE);
275 	pthread__assert(thread != NULL);
276 
277 	/*
278 	 * It's okay to check this without a lock because there can
279 	 * only be one thread before it becomes true.
280 	 */
281 	if (pthread__started == 0) {
282 		pthread__start();
283 		pthread__started = 1;
284 	}
285 
286 	if (attr == NULL)
287 		nattr = pthread_default_attr;
288 	else if (attr->pta_magic == PT_ATTR_MAGIC)
289 		nattr = *attr;
290 	else
291 		return EINVAL;
292 
293 	/* Fetch misc. attributes from the attr structure. */
294 	name = NULL;
295 	if ((p = nattr.pta_private) != NULL)
296 		if (p->ptap_name[0] != '\0')
297 			if ((name = strdup(p->ptap_name)) == NULL)
298 				return ENOMEM;
299 
300 	self = pthread__self();
301 
302 	pthread_spinlock(self, &pthread__deadqueue_lock);
303 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
304 		newthread = PTQ_FIRST(&pthread__deadqueue);
305 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
306 		pthread_spinunlock(self, &pthread__deadqueue_lock);
307 	} else {
308 		pthread_spinunlock(self, &pthread__deadqueue_lock);
309 		/* Set up a stack and allocate space for a pthread_st. */
310 		ret = pthread__stackalloc(&newthread);
311 		if (ret != 0)
312 			return ret;
313 	}
314 
315 	/* 2. Set up state. */
316 	pthread__initthread(self, newthread);
317 	newthread->pt_flags = nattr.pta_flags;
318 	newthread->pt_sigmask = self->pt_sigmask;
319 
320 	/* 3. Set up misc. attributes. */
321 	newthread->pt_name = name;
322 
323 	/*
324 	 * 4. Set up context.
325 	 *
326 	 * The pt_uc pointer points to a location safely below the
327 	 * stack start; this is arranged by pthread__stackalloc().
328 	 */
329 	_INITCONTEXT_U(newthread->pt_uc);
330 	newthread->pt_uc->uc_stack = newthread->pt_stack;
331 	newthread->pt_uc->uc_link = NULL;
332 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
333 	    startfunc, arg);
334 
335 	/* 5. Add to list of all threads. */
336 	pthread_spinlock(self, &pthread__allqueue_lock);
337 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
338 	nthreads++;
339 	pthread_spinunlock(self, &pthread__allqueue_lock);
340 
341 	SDPRINTF(("(pthread_create %p) Created new thread %p (name pointer %p).\n", self, newthread, newthread->pt_name));
342 	/* 6. Put on run queue. */
343 	pthread__sched(self, newthread);
344 
345 	*thread = newthread;
346 
347 	return 0;
348 }
349 
350 
351 static void
352 pthread__create_tramp(void *(*start)(void *), void *arg)
353 {
354 	void *retval;
355 
356 	retval = start(arg);
357 
358 	pthread_exit(retval);
359 
360 	/*NOTREACHED*//*CONSTCOND*/
361 	pthread__assert(0);
362 }
363 
364 
365 /*
366  * Other threads will switch to the idle thread so that they
367  * can dispose of any awkward locks or recycle upcall state.
368  */
369 void
370 pthread__idle(void)
371 {
372 	pthread_t self;
373 
374 	PTHREADD_ADD(PTHREADD_IDLE);
375 	self = pthread__self();
376 	SDPRINTF(("(pthread__idle %p).\n", self));
377 
378 	/*
379 	 * The drill here is that we want to yield the processor,
380 	 * but for the thread itself to be recovered, we need to be on
381 	 * a list somewhere for the thread system to know about us.
382 	 */
383 	pthread_spinlock(self, &pthread__deadqueue_lock);
384 	PTQ_INSERT_TAIL(&pthread__reidlequeue, self, pt_runq);
385 	self->pt_flags |= PT_FLAG_IDLED;
386 	pthread_spinunlock(self, &pthread__deadqueue_lock);
387 
388 	/*
389 	 * If we get to run this, then no preemption has happened
390 	 * (because the upcall handler will not continue an idle thread with
391 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
392 	 */
393 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
394 	sa_yield();
395 
396 	/* NOTREACHED */
397 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
398 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
399 	/* CONSTCOND */
400 	pthread__assert(0);
401 }
402 
403 
404 void
405 pthread_exit(void *retval)
406 {
407 	pthread_t self;
408 	struct pt_clean_t *cleanup;
409 	char *name;
410 	int nt;
411 
412 	self = pthread__self();
413 	SDPRINTF(("(pthread_exit %p) Exiting.\n", self));
414 
415 	/* Disable cancellability. */
416 	self->pt_flags |= PT_FLAG_CS_DISABLED;
417 	self->pt_cancel = 0;
418 
419 	/* Call any cancellation cleanup handlers */
420 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
421 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
422 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
423 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
424 	}
425 
426 	/* Perform cleanup of thread-specific data */
427 	pthread__destroy_tsd(self);
428 
429 	self->pt_exitval = retval;
430 
431 	pthread_spinlock(self, &self->pt_join_lock);
432 	if (self->pt_flags & PT_FLAG_DETACHED) {
433 		name = self->pt_name;
434 		self->pt_name = NULL;
435 		pthread_spinunlock(self, &self->pt_join_lock);
436 
437 		if (name != NULL)
438 			free(name);
439 
440 		pthread_spinlock(self, &pthread__allqueue_lock);
441 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
442 		nthreads--;
443 		nt = nthreads;
444 		pthread_spinunlock(self, &pthread__allqueue_lock);
445 
446 		self->pt_state = PT_STATE_DEAD;
447 		if (nt == 0) {
448 			/* Whoah, we're the last one. Time to go. */
449 			exit(0);
450 		}
451 
452 		/* Yeah, yeah, doing work while we're dead is tacky. */
453 		pthread_spinlock(self, &pthread__deadqueue_lock);
454 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
455 		pthread__block(self, &pthread__deadqueue_lock);
456 	} else {
457 		/* Note: name will be freed by the joiner. */
458 		pthread_spinlock(self, &pthread__allqueue_lock);
459 		nthreads--;
460 		nt = nthreads;
461 		self->pt_state = PT_STATE_ZOMBIE;
462 		pthread_spinunlock(self, &pthread__allqueue_lock);
463 		if (nt == 0) {
464 			/* Whoah, we're the last one. Time to go. */
465 			exit(0);
466 		}
467 		/*
468 		 * Wake up all the potential joiners. Only one can win.
469 		 * (Can you say "Thundering Herd"? I knew you could.)
470 		 */
471 		pthread__sched_sleepers(self, &self->pt_joiners);
472 		pthread__block(self, &self->pt_join_lock);
473 	}
474 
475 	/*NOTREACHED*//*CONSTCOND*/
476 	pthread__assert(0);
477 	exit(1);
478 }
479 
480 
481 int
482 pthread_join(pthread_t thread, void **valptr)
483 {
484 	pthread_t self;
485 	char *name;
486 	int num;
487 
488 	self = pthread__self();
489 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
490 
491 	if (pthread__find(self, thread) != 0)
492 		return ESRCH;
493 
494 	if (thread->pt_magic != PT_MAGIC)
495 		return EINVAL;
496 
497 	if (thread == self)
498 		return EDEADLK;
499 
500 	pthread_spinlock(self, &thread->pt_join_lock);
501 
502 	if (thread->pt_flags & PT_FLAG_DETACHED) {
503 		pthread_spinunlock(self, &thread->pt_join_lock);
504 		return EINVAL;
505 	}
506 
507 	num = thread->pt_num;
508 	while (thread->pt_state != PT_STATE_ZOMBIE) {
509 		if ((thread->pt_state == PT_STATE_DEAD) ||
510 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
511 		    (thread->pt_num != num)) {
512 			/*
513 			 * Another thread beat us to the join, or called
514 			 * pthread_detach(). If num didn't match, the
515 			 * thread died and was recycled before we got
516 			 * another chance to run.
517 			 */
518 			pthread_spinunlock(self, &thread->pt_join_lock);
519 			return ESRCH;
520 		}
521 		/*
522 		 * "I'm not dead yet!"
523 		 * "You will be soon enough."
524 		 */
525 		pthread_spinlock(self, &self->pt_statelock);
526 		if (self->pt_cancel) {
527 			pthread_spinunlock(self, &self->pt_statelock);
528 			pthread_spinunlock(self, &thread->pt_join_lock);
529 			pthread_exit(PTHREAD_CANCELED);
530 		}
531 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
532 		self->pt_sleepobj = thread;
533 		self->pt_sleepq = &thread->pt_joiners;
534 		self->pt_sleeplock = &thread->pt_join_lock;
535 		pthread_spinunlock(self, &self->pt_statelock);
536 
537 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
538 		pthread__block(self, &thread->pt_join_lock);
539 		pthread_spinlock(self, &thread->pt_join_lock);
540 	}
541 
542 	/* All ours. */
543 	thread->pt_state = PT_STATE_DEAD;
544 	name = thread->pt_name;
545 	thread->pt_name = NULL;
546 	pthread_spinunlock(self, &thread->pt_join_lock);
547 
548 	if (valptr != NULL)
549 		*valptr = thread->pt_exitval;
550 
551 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
552 
553 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
554 	pthread_spinlock(self, &pthread__allqueue_lock);
555 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
556 	pthread_spinunlock(self, &pthread__allqueue_lock);
557 
558 	pthread_spinlock(self, &pthread__deadqueue_lock);
559 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
560 	pthread_spinunlock(self, &pthread__deadqueue_lock);
561 
562 	if (name != NULL)
563 		free(name);
564 
565 	return 0;
566 }
567 
568 
569 int
570 pthread_equal(pthread_t t1, pthread_t t2)
571 {
572 
573 	/* Nothing special here. */
574 	return (t1 == t2);
575 }
576 
577 
578 int
579 pthread_detach(pthread_t thread)
580 {
581 	pthread_t self;
582 
583 	self = pthread__self();
584 
585 	if (pthread__find(self, thread) != 0)
586 		return ESRCH;
587 
588 	if (thread->pt_magic != PT_MAGIC)
589 		return EINVAL;
590 
591 	pthread_spinlock(self, &thread->pt_join_lock);
592 
593 	if (thread->pt_flags & PT_FLAG_DETACHED) {
594 		pthread_spinunlock(self, &thread->pt_join_lock);
595 		return EINVAL;
596 	}
597 
598 	thread->pt_flags |= PT_FLAG_DETACHED;
599 
600 	/* Any joiners have to be punted now. */
601 	pthread__sched_sleepers(self, &thread->pt_joiners);
602 
603 	pthread_spinunlock(self, &thread->pt_join_lock);
604 
605 	return 0;
606 }
607 
608 
609 int
610 pthread_getname_np(pthread_t thread, char *name, size_t len)
611 {
612 	pthread_t self;
613 
614 	self = pthread__self();
615 
616 	if (pthread__find(self, thread) != 0)
617 		return ESRCH;
618 
619 	if (thread->pt_magic != PT_MAGIC)
620 		return EINVAL;
621 
622 	pthread_spinlock(self, &thread->pt_join_lock);
623 	if (thread->pt_name == NULL)
624 		name[0] = '\0';
625 	else
626 		strlcpy(name, thread->pt_name, len);
627 	pthread_spinunlock(self, &thread->pt_join_lock);
628 
629 	return 0;
630 }
631 
632 
633 int
634 pthread_setname_np(pthread_t thread, const char *name, void *arg)
635 {
636 	pthread_t self = pthread_self();
637 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
638 	int namelen;
639 
640 	if (pthread__find(self, thread) != 0)
641 		return ESRCH;
642 
643 	if (thread->pt_magic != PT_MAGIC)
644 		return EINVAL;
645 
646 	namelen = snprintf(newname, sizeof(newname), name, arg);
647 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
648 		return EINVAL;
649 
650 	cp = strdup(newname);
651 	if (cp == NULL)
652 		return ENOMEM;
653 
654 	pthread_spinlock(self, &thread->pt_join_lock);
655 
656 	if (thread->pt_state == PT_STATE_DEAD) {
657 		pthread_spinunlock(self, &thread->pt_join_lock);
658 		free(cp);
659 		return EINVAL;
660 	}
661 
662 	oldname = thread->pt_name;
663 	thread->pt_name = cp;
664 
665 	pthread_spinunlock(self, &thread->pt_join_lock);
666 
667 	if (oldname != NULL)
668 		free(oldname);
669 
670 	return 0;
671 }
672 
673 
674 static struct pthread_attr_private *
675 pthread__attr_init_private(pthread_attr_t *attr)
676 {
677 	struct pthread_attr_private *p;
678 
679 	if ((p = attr->pta_private) != NULL)
680 		return p;
681 
682 	p = malloc(sizeof(*p));
683 	if (p != NULL) {
684 		memset(p, 0, sizeof(*p));
685 		attr->pta_private = p;
686 	}
687 	return p;
688 }
689 
690 
691 int
692 pthread_attr_init(pthread_attr_t *attr)
693 {
694 
695 	attr->pta_magic = PT_ATTR_MAGIC;
696 	attr->pta_flags = 0;
697 	attr->pta_private = NULL;
698 
699 	return 0;
700 }
701 
702 
703 int
704 pthread_attr_destroy(pthread_attr_t *attr)
705 {
706 	struct pthread_attr_private *p;
707 
708 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
709 		return EINVAL;
710 
711 	if ((p = attr->pta_private) != NULL)
712 		free(p);
713 
714 	return 0;
715 }
716 
717 
718 int
719 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
720 {
721 
722 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
723 		return EINVAL;
724 
725 	*detachstate = (attr->pta_flags & PT_FLAG_DETACHED);
726 
727 	return 0;
728 }
729 
730 
731 int
732 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
733 {
734 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
735 		return EINVAL;
736 
737 	switch (detachstate) {
738 	case PTHREAD_CREATE_JOINABLE:
739 		attr->pta_flags &= ~PT_FLAG_DETACHED;
740 		break;
741 	case PTHREAD_CREATE_DETACHED:
742 		attr->pta_flags |= PT_FLAG_DETACHED;
743 		break;
744 	default:
745 		return EINVAL;
746 	}
747 
748 	return 0;
749 }
750 
751 
752 int
753 pthread_attr_setschedparam(pthread_attr_t *attr,
754     const struct sched_param *param)
755 {
756 
757 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
758 		return EINVAL;
759 
760 	if (param == NULL)
761 		return EINVAL;
762 
763 	if (param->sched_priority != 0)
764 		return EINVAL;
765 
766 	return 0;
767 }
768 
769 
770 int
771 pthread_attr_getschedparam(const pthread_attr_t *attr,
772     struct sched_param *param)
773 {
774 
775 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
776 		return EINVAL;
777 
778 	if (param == NULL)
779 		return EINVAL;
780 
781 	param->sched_priority = 0;
782 
783 	return 0;
784 }
785 
786 
787 int
788 pthread_attr_getname_np(const pthread_attr_t *attr, char *name, size_t len,
789     void **argp)
790 {
791 	struct pthread_attr_private *p;
792 
793 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
794 		return EINVAL;
795 
796 	if ((p = attr->pta_private) == NULL) {
797 		name[0] = '\0';
798 		if (argp != NULL)
799 			*argp = NULL;
800 	} else {
801 		strlcpy(name, p->ptap_name, len);
802 		if (argp != NULL)
803 			*argp = p->ptap_namearg;
804 	}
805 
806 	return 0;
807 }
808 
809 
810 int
811 pthread_attr_setname_np(pthread_attr_t *attr, const char *name, void *arg)
812 {
813 	struct pthread_attr_private *p;
814 	int namelen;
815 
816 	p = pthread__attr_init_private(attr);
817 	if (p == NULL)
818 		return ENOMEM;
819 
820 	namelen = snprintf(p->ptap_name, PTHREAD_MAX_NAMELEN_NP, name, arg);
821 	if (namelen >= PTHREAD_MAX_NAMELEN_NP) {
822 		p->ptap_name[0] = '\0';
823 		return EINVAL;
824 	}
825 	p->ptap_namearg = arg;
826 
827 	return 0;
828 }
829 
830 
831 /*
832  * XXX There should be a way for applications to use the efficent
833  *  inline version, but there are opacity/namespace issues.
834  */
835 pthread_t
836 pthread_self(void)
837 {
838 
839 	return pthread__self();
840 }
841 
842 
843 int
844 pthread_cancel(pthread_t thread)
845 {
846 	pthread_t self;
847 	int flags;
848 
849 	if (!(thread->pt_state == PT_STATE_RUNNING ||
850 	    thread->pt_state == PT_STATE_RUNNABLE ||
851 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE ||
852 	    thread->pt_state == PT_STATE_BLOCKED_SYS))
853 		return ESRCH;
854 
855 	self = pthread__self();
856 	flags = thread->pt_flags;
857 
858 	flags |= PT_FLAG_CS_PENDING;
859 	if ((flags & PT_FLAG_CS_DISABLED) == 0) {
860 		thread->pt_cancel = 1;
861 		pthread_spinlock(self, &thread->pt_statelock);
862 		if (thread->pt_state == PT_STATE_BLOCKED_SYS) {
863 			/*
864 			 * It's sleeping in the kernel. If we can wake
865 			 * it up, it will notice the cancellation when
866 			 * it returns. If it doesn't wake up when we
867 			 * make this call, then it's blocked
868 			 * uninterruptably in the kernel, and there's
869 			 * not much to be done about it.
870 			 */
871 			_lwp_wakeup(thread->pt_blockedlwp);
872 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
873 			/*
874 			 * We're blocked somewhere (pthread__block()
875 			 * was called). Cause it to wake up; it will
876 			 * check for the cancellation if the routine
877 			 * is a cancellation point, and loop and reblock
878 			 * otherwise.
879 			 */
880 			pthread_spinlock(self, thread->pt_sleeplock);
881 			PTQ_REMOVE(thread->pt_sleepq, thread,
882 			    pt_sleep);
883 			pthread_spinunlock(self, thread->pt_sleeplock);
884 			pthread__sched(self, thread);
885 		} else {
886 			/*
887 			 * Nothing. The target thread is running and will
888 			 * notice at the next deferred cancellation point.
889 			 */
890 		}
891 		pthread_spinunlock(self, &thread->pt_statelock);
892 	}
893 
894 	thread->pt_flags = flags;
895 
896 	return 0;
897 }
898 
899 
900 int
901 pthread_setcancelstate(int state, int *oldstate)
902 {
903 	pthread_t self;
904 	int flags;
905 
906 	self = pthread__self();
907 	flags = self->pt_flags;
908 
909 	if (oldstate != NULL) {
910 		if (flags & PT_FLAG_CS_DISABLED)
911 			*oldstate = PTHREAD_CANCEL_DISABLE;
912 		else
913 			*oldstate = PTHREAD_CANCEL_ENABLE;
914 	}
915 
916 	if (state == PTHREAD_CANCEL_DISABLE)
917 		flags |= PT_FLAG_CS_DISABLED;
918 	else if (state == PTHREAD_CANCEL_ENABLE) {
919 		flags &= ~PT_FLAG_CS_DISABLED;
920 		/*
921 		 * If a cancellation was requested while cancellation
922 		 * was disabled, note that fact for future
923 		 * cancellation tests.
924 		 */
925 		if (flags & PT_FLAG_CS_PENDING) {
926 			self->pt_cancel = 1;
927 			/* This is not a deferred cancellation point. */
928 			if (flags & PT_FLAG_CS_ASYNC)
929 				pthread_exit(PTHREAD_CANCELED);
930 		}
931 	} else
932 		return EINVAL;
933 
934 	self->pt_flags = flags;
935 
936 	return 0;
937 }
938 
939 
940 int
941 pthread_setcanceltype(int type, int *oldtype)
942 {
943 	pthread_t self;
944 	int flags;
945 
946 	self = pthread__self();
947 	flags = self->pt_flags;
948 
949 	if (oldtype != NULL) {
950 		if (flags & PT_FLAG_CS_ASYNC)
951 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
952 		else
953 			*oldtype = PTHREAD_CANCEL_DEFERRED;
954 	}
955 
956 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
957 		flags |= PT_FLAG_CS_ASYNC;
958 		if (self->pt_cancel)
959 			pthread_exit(PTHREAD_CANCELED);
960 	} else if (type == PTHREAD_CANCEL_DEFERRED)
961 		flags &= ~PT_FLAG_CS_ASYNC;
962 	else
963 		return EINVAL;
964 
965 	self->pt_flags = flags;
966 
967 	return 0;
968 }
969 
970 
971 void
972 pthread_testcancel()
973 {
974 	pthread_t self;
975 
976 	self = pthread__self();
977 	if (self->pt_cancel)
978 		pthread_exit(PTHREAD_CANCELED);
979 }
980 
981 
982 /*
983  * POSIX requires that certain functions return an error rather than
984  * invoking undefined behavior even when handed completely bogus
985  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
986  * utility routine searches the list of threads for the pthread_t
987  * value without dereferencing it.
988  */
989 int
990 pthread__find(pthread_t self, pthread_t id)
991 {
992 	pthread_t target;
993 
994 	pthread_spinlock(self, &pthread__allqueue_lock);
995 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
996 	    if (target == id)
997 		    break;
998 	pthread_spinunlock(self, &pthread__allqueue_lock);
999 
1000 	if (target == NULL)
1001 		return ESRCH;
1002 
1003 	return 0;
1004 }
1005 
1006 
1007 void
1008 pthread__testcancel(pthread_t self)
1009 {
1010 
1011 	if (self->pt_cancel)
1012 		pthread_exit(PTHREAD_CANCELED);
1013 }
1014 
1015 
1016 void
1017 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
1018 {
1019 	pthread_t self;
1020 	struct pt_clean_t *entry;
1021 
1022 	self = pthread__self();
1023 	entry = store;
1024 	entry->ptc_cleanup = cleanup;
1025 	entry->ptc_arg = arg;
1026 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1027 }
1028 
1029 
1030 void
1031 pthread__cleanup_pop(int ex, void *store)
1032 {
1033 	pthread_t self;
1034 	struct pt_clean_t *entry;
1035 
1036 	self = pthread__self();
1037 	entry = store;
1038 
1039 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
1040 	if (ex)
1041 		(*entry->ptc_cleanup)(entry->ptc_arg);
1042 }
1043 
1044 
1045 int *
1046 pthread__errno(void)
1047 {
1048 	pthread_t self;
1049 
1050 	self = pthread__self();
1051 
1052 	return &(self->pt_errno);
1053 }
1054 
1055 void
1056 pthread__assertfunc(char *file, int line, char *function, char *expr)
1057 {
1058 	char buf[1024];
1059 	int len;
1060 
1061 	/*
1062 	 * snprintf should not acquire any locks, or we could
1063 	 * end up deadlocked if the assert caller held locks.
1064 	 */
1065 	len = snprintf(buf, 1024,
1066 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
1067 	    expr, file, line,
1068 	    function ? ", function \"" : "",
1069 	    function ? function : "",
1070 	    function ? "\"" : "");
1071 
1072 	write(STDERR_FILENO, buf, len);
1073 	(void)kill(getpid(), SIGABRT);
1074 
1075 	_exit(1);
1076 }
1077 
1078 
1079 void
1080 pthread__errorfunc(char *file, int line, char *function, char *msg)
1081 {
1082 	char buf[1024];
1083 	int len;
1084 
1085 	if (pthread__errormode == PTHREAD_ERRORMODE_IGNORE)
1086 		return;
1087 
1088 	/*
1089 	 * snprintf should not acquire any locks, or we could
1090 	 * end up deadlocked if the assert caller held locks.
1091 	 */
1092 	len = snprintf(buf, 1024,
1093 	    "Error detected, file \"%s\", line %d%s%s%s: %s.\n",
1094 	    file, line,
1095 	    function ? ", function \"" : "",
1096 	    function ? function : "",
1097 	    function ? "\"" : "",
1098 	    msg);
1099 
1100 	write(STDERR_FILENO, buf, len);
1101 	if (pthread__errormode == PTHREAD_ERRORMODE_ABORT) {
1102 		(void)kill(getpid(), SIGABRT);
1103 
1104 		_exit(1);
1105 	}
1106 }
1107