xref: /netbsd-src/lib/libpthread/pthread.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: pthread.c,v 1.22 2003/06/26 01:30:39 nathanw Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread.c,v 1.22 2003/06/26 01:30:39 nathanw Exp $");
41 
42 #include <err.h>
43 #include <errno.h>
44 #include <lwp.h>
45 #include <signal.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <ucontext.h>
50 #include <unistd.h>
51 
52 #include <sched.h>
53 #include "pthread.h"
54 #include "pthread_int.h"
55 
56 #ifdef PTHREAD_MAIN_DEBUG
57 #define SDPRINTF(x) DPRINTF(x)
58 #else
59 #define SDPRINTF(x)
60 #endif
61 
62 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
63 
64 int pthread__started;
65 
66 pthread_spin_t pthread__allqueue_lock;
67 struct pthread_queue_t pthread__allqueue;
68 
69 pthread_spin_t pthread__deadqueue_lock;
70 struct pthread_queue_t pthread__deadqueue;
71 struct pthread_queue_t pthread__reidlequeue;
72 
73 static int nthreads;
74 static int nextthread;
75 static pthread_spin_t nextthread_lock;
76 static pthread_attr_t pthread_default_attr;
77 
78 #define PTHREAD_ERRORMODE_ABORT		1
79 #define PTHREAD_ERRORMODE_PRINT       	2
80 #define PTHREAD_ERRORMODE_IGNORE	3
81 
82 static int pthread__errormode;
83 
84 pthread_spin_t pthread__runqueue_lock;
85 struct pthread_queue_t pthread__runqueue;
86 struct pthread_queue_t pthread__idlequeue;
87 
88 __strong_alias(__libc_thr_self,pthread_self)
89 __strong_alias(__libc_thr_create,pthread_create)
90 __strong_alias(__libc_thr_exit,pthread_exit)
91 __strong_alias(__libc_thr_errno,pthread__errno)
92 
93 /*
94  * Static library kludge.  Place a reference to a symbol any library
95  * file which does not already have a reference here.
96  */
97 extern int pthread__cancel_stub_binder;
98 extern int pthread__sched_binder;
99 extern struct pthread_queue_t pthread__nanosleeping;
100 
101 void *pthread__static_lib_binder[] = {
102 	&pthread__cancel_stub_binder,
103 	pthread_cond_init,
104 	pthread_mutex_init,
105 	pthread_rwlock_init,
106 	pthread_barrier_init,
107 	pthread_key_create,
108 	&pthread__sched_binder,
109 	&pthread__nanosleeping
110 };
111 
112 /* Private data for pthread_attr_t */
113 struct pthread_attr_private {
114 	char ptap_name[PTHREAD_MAX_NAMELEN_NP];
115 	void *ptap_namearg;
116 };
117 
118 /*
119  * This needs to be started by the library loading code, before main()
120  * gets to run, for various things that use the state of the initial thread
121  * to work properly (thread-specific data is an application-visible example;
122  * spinlock counts for mutexes is an internal example).
123  */
124 void
125 pthread_init(void)
126 {
127 	pthread_t first;
128 	char *mode;
129 	extern int __isthreaded;
130 
131 	/* Initialize locks first; they're needed elsewhere. */
132 	pthread__lockprim_init();
133 
134 	/* Basic data structure setup */
135 	pthread_attr_init(&pthread_default_attr);
136 	PTQ_INIT(&pthread__allqueue);
137 	PTQ_INIT(&pthread__deadqueue);
138 	PTQ_INIT(&pthread__reidlequeue);
139 	PTQ_INIT(&pthread__runqueue);
140 	PTQ_INIT(&pthread__idlequeue);
141 
142 	/* Create the thread structure corresponding to main() */
143 	pthread__initmain(&first);
144 	pthread__initthread(first, first);
145 	first->pt_state = PT_STATE_RUNNING;
146 	sigprocmask(0, NULL, &first->pt_sigmask);
147 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
148 
149 	/* Start subsystems */
150 	pthread__signal_init();
151 	PTHREAD_MD_INIT
152 #ifdef PTHREAD__DEBUG
153 	pthread__debug_init();
154 #endif
155 
156 	pthread__errormode = PTHREAD_ERRORMODE_ABORT;
157 	if ((mode = getenv("PTHREAD_ERRORMODE")) != NULL) {
158 		if (strcasecmp(mode, "ignore") == 0)
159 			pthread__errormode = PTHREAD_ERRORMODE_IGNORE;
160 		else if (strcasecmp(mode, "print") == 0)
161 			pthread__errormode = PTHREAD_ERRORMODE_PRINT;
162 		else if (strcasecmp(mode, "abort") == 0)
163 			pthread__errormode = PTHREAD_ERRORMODE_ABORT;
164 	}
165 
166 	/* Tell libc that we're here and it should role-play accordingly. */
167 	__isthreaded = 1;
168 }
169 
170 static void
171 pthread__child_callback(void)
172 {
173 	/*
174 	 * Clean up data structures that a forked child process might
175 	 * trip over. Note that if threads have been created (causing
176 	 * this handler to be registered) the standards say that the
177 	 * child will trigger undefined behavior if it makes any
178 	 * pthread_* calls (or any other calls that aren't
179 	 * async-signal-safe), so we don't really have to clean up
180 	 * much. Anything that permits some pthread_* calls to work is
181 	 * merely being polite.
182 	 */
183 	pthread__started = 0;
184 }
185 
186 static void
187 pthread__start(void)
188 {
189 	pthread_t self, idle;
190 	int i, ret;
191 
192 	self = pthread__self(); /* should be the "main()" thread */
193 
194 	/*
195 	 * Per-process timers are cleared by fork(); despite the
196 	 * various restrictions on fork() and threads, it's legal to
197 	 * fork() before creating any threads.
198 	 */
199 	pthread__alarm_init();
200 
201 	pthread_atfork(NULL, NULL, pthread__child_callback);
202 
203 	/* Create idle threads */
204 	for (i = 0; i < NIDLETHREADS; i++) {
205 		ret = pthread__stackalloc(&idle);
206 		if (ret != 0)
207 			err(1, "Couldn't allocate stack for idle thread!");
208 		pthread__initthread(self, idle);
209 		sigfillset(&idle->pt_sigmask);
210 		idle->pt_type = PT_THREAD_IDLE;
211 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
212 		pthread__sched_idle(self, idle);
213 	}
214 
215 	nthreads = 1;
216 	/* Start up the SA subsystem */
217 	pthread__sa_start();
218 	SDPRINTF(("(pthread__start %p) Started.\n", self));
219 }
220 
221 
222 /* General-purpose thread data structure sanitization. */
223 void
224 pthread__initthread(pthread_t self, pthread_t t)
225 {
226 	int id;
227 
228 	pthread_spinlock(self, &nextthread_lock);
229 	id = nextthread;
230 	nextthread++;
231 	pthread_spinunlock(self, &nextthread_lock);
232 	t->pt_num = id;
233 
234 	t->pt_magic = PT_MAGIC;
235 	t->pt_type = PT_THREAD_NORMAL;
236 	t->pt_state = PT_STATE_RUNNABLE;
237 	pthread_lockinit(&t->pt_statelock);
238 	t->pt_spinlocks = 0;
239 	t->pt_next = NULL;
240 	t->pt_exitval = NULL;
241 	t->pt_flags = 0;
242 	t->pt_cancel = 0;
243 	t->pt_errno = 0;
244 	t->pt_parent = NULL;
245 	t->pt_heldlock = NULL;
246 	t->pt_switchto = NULL;
247 	t->pt_trapuc = NULL;
248 	sigemptyset(&t->pt_siglist);
249 	sigemptyset(&t->pt_sigmask);
250 	pthread_lockinit(&t->pt_siglock);
251 	PTQ_INIT(&t->pt_joiners);
252 	pthread_lockinit(&t->pt_join_lock);
253 	PTQ_INIT(&t->pt_cleanup_stack);
254 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
255 	t->pt_name = NULL;
256 #ifdef PTHREAD__DEBUG
257 	t->blocks = 0;
258 	t->preempts = 0;
259 	t->rescheds = 0;
260 #endif
261 }
262 
263 
264 int
265 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
266 	    void *(*startfunc)(void *), void *arg)
267 {
268 	pthread_t self, newthread;
269 	pthread_attr_t nattr;
270 	struct pthread_attr_private *p;
271 	char *name;
272 	int ret;
273 
274 	PTHREADD_ADD(PTHREADD_CREATE);
275 	pthread__assert(thread != NULL);
276 
277 	/*
278 	 * It's okay to check this without a lock because there can
279 	 * only be one thread before it becomes true.
280 	 */
281 	if (pthread__started == 0) {
282 		pthread__start();
283 		pthread__started = 1;
284 	}
285 
286 	if (attr == NULL)
287 		nattr = pthread_default_attr;
288 	else if (attr->pta_magic == PT_ATTR_MAGIC)
289 		nattr = *attr;
290 	else
291 		return EINVAL;
292 
293 	/* Fetch misc. attributes from the attr structure. */
294 	name = NULL;
295 	if ((p = nattr.pta_private) != NULL)
296 		if (p->ptap_name[0] != '\0')
297 			if ((name = strdup(p->ptap_name)) == NULL)
298 				return ENOMEM;
299 
300 	self = pthread__self();
301 
302 	pthread_spinlock(self, &pthread__deadqueue_lock);
303 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
304 		newthread = PTQ_FIRST(&pthread__deadqueue);
305 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
306 		pthread_spinunlock(self, &pthread__deadqueue_lock);
307 	} else {
308 		pthread_spinunlock(self, &pthread__deadqueue_lock);
309 		/* Set up a stack and allocate space for a pthread_st. */
310 		ret = pthread__stackalloc(&newthread);
311 		if (ret != 0)
312 			return ret;
313 	}
314 
315 	/* 2. Set up state. */
316 	pthread__initthread(self, newthread);
317 	newthread->pt_flags = nattr.pta_flags;
318 	newthread->pt_sigmask = self->pt_sigmask;
319 
320 	/* 3. Set up misc. attributes. */
321 	newthread->pt_name = name;
322 
323 	/*
324 	 * 4. Set up context.
325 	 *
326 	 * The pt_uc pointer points to a location safely below the
327 	 * stack start; this is arranged by pthread__stackalloc().
328 	 */
329 	_INITCONTEXT_U(newthread->pt_uc);
330 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
331 	pthread__uc_id(newthread->pt_uc) = newthread;
332 #endif
333 	newthread->pt_uc->uc_stack = newthread->pt_stack;
334 	newthread->pt_uc->uc_link = NULL;
335 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
336 	    startfunc, arg);
337 
338 	/* 5. Add to list of all threads. */
339 	pthread_spinlock(self, &pthread__allqueue_lock);
340 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
341 	nthreads++;
342 	pthread_spinunlock(self, &pthread__allqueue_lock);
343 
344 	SDPRINTF(("(pthread_create %p) Created new thread %p (name pointer %p).\n", self, newthread, newthread->pt_name));
345 	/* 6. Put on run queue. */
346 	pthread__sched(self, newthread);
347 
348 	*thread = newthread;
349 
350 	return 0;
351 }
352 
353 
354 static void
355 pthread__create_tramp(void *(*start)(void *), void *arg)
356 {
357 	void *retval;
358 
359 	retval = start(arg);
360 
361 	pthread_exit(retval);
362 
363 	/*NOTREACHED*/
364 	pthread__abort();
365 }
366 
367 
368 /*
369  * Other threads will switch to the idle thread so that they
370  * can dispose of any awkward locks or recycle upcall state.
371  */
372 void
373 pthread__idle(void)
374 {
375 	pthread_t self;
376 
377 	PTHREADD_ADD(PTHREADD_IDLE);
378 	self = pthread__self();
379 	SDPRINTF(("(pthread__idle %p).\n", self));
380 
381 	/*
382 	 * The drill here is that we want to yield the processor,
383 	 * but for the thread itself to be recovered, we need to be on
384 	 * a list somewhere for the thread system to know about us.
385 	 */
386 	pthread_spinlock(self, &pthread__deadqueue_lock);
387 	PTQ_INSERT_TAIL(&pthread__reidlequeue, self, pt_runq);
388 	self->pt_flags |= PT_FLAG_IDLED;
389 	pthread_spinunlock(self, &pthread__deadqueue_lock);
390 
391 	/*
392 	 * If we get to run this, then no preemption has happened
393 	 * (because the upcall handler will not continue an idle thread with
394 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
395 	 */
396 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
397 	sa_yield();
398 
399 	/* NOTREACHED */
400 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
401 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
402 	pthread__abort();
403 }
404 
405 
406 void
407 pthread_exit(void *retval)
408 {
409 	pthread_t self;
410 	struct pt_clean_t *cleanup;
411 	char *name;
412 	int nt;
413 
414 	self = pthread__self();
415 	SDPRINTF(("(pthread_exit %p) Exiting.\n", self));
416 
417 	/* Disable cancellability. */
418 	self->pt_flags |= PT_FLAG_CS_DISABLED;
419 	self->pt_cancel = 0;
420 
421 	/* Call any cancellation cleanup handlers */
422 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
423 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
424 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
425 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
426 	}
427 
428 	/* Perform cleanup of thread-specific data */
429 	pthread__destroy_tsd(self);
430 
431 	self->pt_exitval = retval;
432 
433 	pthread_spinlock(self, &self->pt_join_lock);
434 	if (self->pt_flags & PT_FLAG_DETACHED) {
435 		name = self->pt_name;
436 		self->pt_name = NULL;
437 		pthread_spinunlock(self, &self->pt_join_lock);
438 
439 		if (name != NULL)
440 			free(name);
441 
442 		pthread_spinlock(self, &pthread__allqueue_lock);
443 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
444 		nthreads--;
445 		nt = nthreads;
446 		pthread_spinunlock(self, &pthread__allqueue_lock);
447 
448 		self->pt_state = PT_STATE_DEAD;
449 		if (nt == 0) {
450 			/* Whoah, we're the last one. Time to go. */
451 			exit(0);
452 		}
453 
454 		/* Yeah, yeah, doing work while we're dead is tacky. */
455 		pthread_spinlock(self, &pthread__deadqueue_lock);
456 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
457 		pthread__block(self, &pthread__deadqueue_lock);
458 	} else {
459 		/* Note: name will be freed by the joiner. */
460 		pthread_spinlock(self, &pthread__allqueue_lock);
461 		nthreads--;
462 		nt = nthreads;
463 		self->pt_state = PT_STATE_ZOMBIE;
464 		pthread_spinunlock(self, &pthread__allqueue_lock);
465 		if (nt == 0) {
466 			/* Whoah, we're the last one. Time to go. */
467 			exit(0);
468 		}
469 		/*
470 		 * Wake up all the potential joiners. Only one can win.
471 		 * (Can you say "Thundering Herd"? I knew you could.)
472 		 */
473 		pthread__sched_sleepers(self, &self->pt_joiners);
474 		pthread__block(self, &self->pt_join_lock);
475 	}
476 
477 	/*NOTREACHED*/
478 	pthread__abort();
479 	exit(1);
480 }
481 
482 
483 int
484 pthread_join(pthread_t thread, void **valptr)
485 {
486 	pthread_t self;
487 	char *name;
488 	int num;
489 
490 	self = pthread__self();
491 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
492 
493 	if (pthread__find(self, thread) != 0)
494 		return ESRCH;
495 
496 	if (thread->pt_magic != PT_MAGIC)
497 		return EINVAL;
498 
499 	if (thread == self)
500 		return EDEADLK;
501 
502 	pthread_spinlock(self, &thread->pt_join_lock);
503 
504 	if (thread->pt_flags & PT_FLAG_DETACHED) {
505 		pthread_spinunlock(self, &thread->pt_join_lock);
506 		return EINVAL;
507 	}
508 
509 	num = thread->pt_num;
510 	while (thread->pt_state != PT_STATE_ZOMBIE) {
511 		if ((thread->pt_state == PT_STATE_DEAD) ||
512 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
513 		    (thread->pt_num != num)) {
514 			/*
515 			 * Another thread beat us to the join, or called
516 			 * pthread_detach(). If num didn't match, the
517 			 * thread died and was recycled before we got
518 			 * another chance to run.
519 			 */
520 			pthread_spinunlock(self, &thread->pt_join_lock);
521 			return ESRCH;
522 		}
523 		/*
524 		 * "I'm not dead yet!"
525 		 * "You will be soon enough."
526 		 */
527 		pthread_spinlock(self, &self->pt_statelock);
528 		if (self->pt_cancel) {
529 			pthread_spinunlock(self, &self->pt_statelock);
530 			pthread_spinunlock(self, &thread->pt_join_lock);
531 			pthread_exit(PTHREAD_CANCELED);
532 		}
533 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
534 		self->pt_sleepobj = thread;
535 		self->pt_sleepq = &thread->pt_joiners;
536 		self->pt_sleeplock = &thread->pt_join_lock;
537 		pthread_spinunlock(self, &self->pt_statelock);
538 
539 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
540 		pthread__block(self, &thread->pt_join_lock);
541 		pthread_spinlock(self, &thread->pt_join_lock);
542 	}
543 
544 	/* All ours. */
545 	thread->pt_state = PT_STATE_DEAD;
546 	name = thread->pt_name;
547 	thread->pt_name = NULL;
548 	pthread_spinunlock(self, &thread->pt_join_lock);
549 
550 	if (valptr != NULL)
551 		*valptr = thread->pt_exitval;
552 
553 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
554 
555 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
556 	pthread_spinlock(self, &pthread__allqueue_lock);
557 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
558 	pthread_spinunlock(self, &pthread__allqueue_lock);
559 
560 	pthread_spinlock(self, &pthread__deadqueue_lock);
561 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
562 	pthread_spinunlock(self, &pthread__deadqueue_lock);
563 
564 	if (name != NULL)
565 		free(name);
566 
567 	return 0;
568 }
569 
570 
571 int
572 pthread_equal(pthread_t t1, pthread_t t2)
573 {
574 
575 	/* Nothing special here. */
576 	return (t1 == t2);
577 }
578 
579 
580 int
581 pthread_detach(pthread_t thread)
582 {
583 	pthread_t self;
584 
585 	self = pthread__self();
586 
587 	if (pthread__find(self, thread) != 0)
588 		return ESRCH;
589 
590 	if (thread->pt_magic != PT_MAGIC)
591 		return EINVAL;
592 
593 	pthread_spinlock(self, &thread->pt_join_lock);
594 
595 	if (thread->pt_flags & PT_FLAG_DETACHED) {
596 		pthread_spinunlock(self, &thread->pt_join_lock);
597 		return EINVAL;
598 	}
599 
600 	thread->pt_flags |= PT_FLAG_DETACHED;
601 
602 	/* Any joiners have to be punted now. */
603 	pthread__sched_sleepers(self, &thread->pt_joiners);
604 
605 	pthread_spinunlock(self, &thread->pt_join_lock);
606 
607 	return 0;
608 }
609 
610 
611 int
612 pthread_getname_np(pthread_t thread, char *name, size_t len)
613 {
614 	pthread_t self;
615 
616 	self = pthread__self();
617 
618 	if (pthread__find(self, thread) != 0)
619 		return ESRCH;
620 
621 	if (thread->pt_magic != PT_MAGIC)
622 		return EINVAL;
623 
624 	pthread_spinlock(self, &thread->pt_join_lock);
625 	if (thread->pt_name == NULL)
626 		name[0] = '\0';
627 	else
628 		strlcpy(name, thread->pt_name, len);
629 	pthread_spinunlock(self, &thread->pt_join_lock);
630 
631 	return 0;
632 }
633 
634 
635 int
636 pthread_setname_np(pthread_t thread, const char *name, void *arg)
637 {
638 	pthread_t self = pthread_self();
639 	char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
640 	int namelen;
641 
642 	if (pthread__find(self, thread) != 0)
643 		return ESRCH;
644 
645 	if (thread->pt_magic != PT_MAGIC)
646 		return EINVAL;
647 
648 	namelen = snprintf(newname, sizeof(newname), name, arg);
649 	if (namelen >= PTHREAD_MAX_NAMELEN_NP)
650 		return EINVAL;
651 
652 	cp = strdup(newname);
653 	if (cp == NULL)
654 		return ENOMEM;
655 
656 	pthread_spinlock(self, &thread->pt_join_lock);
657 
658 	if (thread->pt_state == PT_STATE_DEAD) {
659 		pthread_spinunlock(self, &thread->pt_join_lock);
660 		free(cp);
661 		return EINVAL;
662 	}
663 
664 	oldname = thread->pt_name;
665 	thread->pt_name = cp;
666 
667 	pthread_spinunlock(self, &thread->pt_join_lock);
668 
669 	if (oldname != NULL)
670 		free(oldname);
671 
672 	return 0;
673 }
674 
675 
676 static struct pthread_attr_private *
677 pthread__attr_init_private(pthread_attr_t *attr)
678 {
679 	struct pthread_attr_private *p;
680 
681 	if ((p = attr->pta_private) != NULL)
682 		return p;
683 
684 	p = malloc(sizeof(*p));
685 	if (p != NULL) {
686 		memset(p, 0, sizeof(*p));
687 		attr->pta_private = p;
688 	}
689 	return p;
690 }
691 
692 
693 int
694 pthread_attr_init(pthread_attr_t *attr)
695 {
696 
697 	attr->pta_magic = PT_ATTR_MAGIC;
698 	attr->pta_flags = 0;
699 	attr->pta_private = NULL;
700 
701 	return 0;
702 }
703 
704 
705 int
706 pthread_attr_destroy(pthread_attr_t *attr)
707 {
708 	struct pthread_attr_private *p;
709 
710 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
711 		return EINVAL;
712 
713 	if ((p = attr->pta_private) != NULL)
714 		free(p);
715 
716 	return 0;
717 }
718 
719 
720 int
721 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
722 {
723 
724 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
725 		return EINVAL;
726 
727 	*detachstate = (attr->pta_flags & PT_FLAG_DETACHED);
728 
729 	return 0;
730 }
731 
732 
733 int
734 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
735 {
736 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
737 		return EINVAL;
738 
739 	switch (detachstate) {
740 	case PTHREAD_CREATE_JOINABLE:
741 		attr->pta_flags &= ~PT_FLAG_DETACHED;
742 		break;
743 	case PTHREAD_CREATE_DETACHED:
744 		attr->pta_flags |= PT_FLAG_DETACHED;
745 		break;
746 	default:
747 		return EINVAL;
748 	}
749 
750 	return 0;
751 }
752 
753 
754 int
755 pthread_attr_setschedparam(pthread_attr_t *attr,
756     const struct sched_param *param)
757 {
758 
759 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
760 		return EINVAL;
761 
762 	if (param == NULL)
763 		return EINVAL;
764 
765 	if (param->sched_priority != 0)
766 		return EINVAL;
767 
768 	return 0;
769 }
770 
771 
772 int
773 pthread_attr_getschedparam(const pthread_attr_t *attr,
774     struct sched_param *param)
775 {
776 
777 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
778 		return EINVAL;
779 
780 	if (param == NULL)
781 		return EINVAL;
782 
783 	param->sched_priority = 0;
784 
785 	return 0;
786 }
787 
788 int
789 pthread_attr_getstack(const pthread_attr_t *attr, void **addr, size_t *size)
790 {
791 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
792 		return EINVAL;
793 	*addr = pthread__self()->pt_stack.ss_sp;
794 	*size = pthread__self()->pt_stack.ss_size;
795 	return 0;
796 }
797 
798 int
799 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *size)
800 {
801 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
802 		return EINVAL;
803 	*size = pthread__self()->pt_stack.ss_size;
804 	return 0;
805 }
806 
807 int
808 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **addr)
809 {
810 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
811 		return EINVAL;
812 	*addr = pthread__self()->pt_stack.ss_sp;
813 	return 0;
814 }
815 
816 
817 int
818 pthread_attr_getname_np(const pthread_attr_t *attr, char *name, size_t len,
819     void **argp)
820 {
821 	struct pthread_attr_private *p;
822 
823 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
824 		return EINVAL;
825 
826 	if ((p = attr->pta_private) == NULL) {
827 		name[0] = '\0';
828 		if (argp != NULL)
829 			*argp = NULL;
830 	} else {
831 		strlcpy(name, p->ptap_name, len);
832 		if (argp != NULL)
833 			*argp = p->ptap_namearg;
834 	}
835 
836 	return 0;
837 }
838 
839 
840 int
841 pthread_attr_setname_np(pthread_attr_t *attr, const char *name, void *arg)
842 {
843 	struct pthread_attr_private *p;
844 	int namelen;
845 
846 	p = pthread__attr_init_private(attr);
847 	if (p == NULL)
848 		return ENOMEM;
849 
850 	namelen = snprintf(p->ptap_name, PTHREAD_MAX_NAMELEN_NP, name, arg);
851 	if (namelen >= PTHREAD_MAX_NAMELEN_NP) {
852 		p->ptap_name[0] = '\0';
853 		return EINVAL;
854 	}
855 	p->ptap_namearg = arg;
856 
857 	return 0;
858 }
859 
860 
861 /*
862  * XXX There should be a way for applications to use the efficent
863  *  inline version, but there are opacity/namespace issues.
864  */
865 pthread_t
866 pthread_self(void)
867 {
868 
869 	return pthread__self();
870 }
871 
872 
873 int
874 pthread_cancel(pthread_t thread)
875 {
876 	pthread_t self;
877 	int flags;
878 
879 	if (!(thread->pt_state == PT_STATE_RUNNING ||
880 	    thread->pt_state == PT_STATE_RUNNABLE ||
881 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE ||
882 	    thread->pt_state == PT_STATE_BLOCKED_SYS))
883 		return ESRCH;
884 
885 	self = pthread__self();
886 	flags = thread->pt_flags;
887 
888 	flags |= PT_FLAG_CS_PENDING;
889 	if ((flags & PT_FLAG_CS_DISABLED) == 0) {
890 		thread->pt_cancel = 1;
891 		pthread_spinlock(self, &thread->pt_statelock);
892 		if (thread->pt_state == PT_STATE_BLOCKED_SYS) {
893 			/*
894 			 * It's sleeping in the kernel. If we can wake
895 			 * it up, it will notice the cancellation when
896 			 * it returns. If it doesn't wake up when we
897 			 * make this call, then it's blocked
898 			 * uninterruptably in the kernel, and there's
899 			 * not much to be done about it.
900 			 */
901 			_lwp_wakeup(thread->pt_blockedlwp);
902 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
903 			/*
904 			 * We're blocked somewhere (pthread__block()
905 			 * was called). Cause it to wake up; it will
906 			 * check for the cancellation if the routine
907 			 * is a cancellation point, and loop and reblock
908 			 * otherwise.
909 			 */
910 			pthread_spinlock(self, thread->pt_sleeplock);
911 			PTQ_REMOVE(thread->pt_sleepq, thread,
912 			    pt_sleep);
913 			pthread_spinunlock(self, thread->pt_sleeplock);
914 			pthread__sched(self, thread);
915 		} else {
916 			/*
917 			 * Nothing. The target thread is running and will
918 			 * notice at the next deferred cancellation point.
919 			 */
920 		}
921 		pthread_spinunlock(self, &thread->pt_statelock);
922 	}
923 
924 	thread->pt_flags = flags;
925 
926 	return 0;
927 }
928 
929 
930 int
931 pthread_setcancelstate(int state, int *oldstate)
932 {
933 	pthread_t self;
934 	int flags;
935 
936 	self = pthread__self();
937 	flags = self->pt_flags;
938 
939 	if (oldstate != NULL) {
940 		if (flags & PT_FLAG_CS_DISABLED)
941 			*oldstate = PTHREAD_CANCEL_DISABLE;
942 		else
943 			*oldstate = PTHREAD_CANCEL_ENABLE;
944 	}
945 
946 	if (state == PTHREAD_CANCEL_DISABLE)
947 		flags |= PT_FLAG_CS_DISABLED;
948 	else if (state == PTHREAD_CANCEL_ENABLE) {
949 		flags &= ~PT_FLAG_CS_DISABLED;
950 		/*
951 		 * If a cancellation was requested while cancellation
952 		 * was disabled, note that fact for future
953 		 * cancellation tests.
954 		 */
955 		if (flags & PT_FLAG_CS_PENDING) {
956 			self->pt_cancel = 1;
957 			/* This is not a deferred cancellation point. */
958 			if (flags & PT_FLAG_CS_ASYNC)
959 				pthread_exit(PTHREAD_CANCELED);
960 		}
961 	} else
962 		return EINVAL;
963 
964 	self->pt_flags = flags;
965 
966 	return 0;
967 }
968 
969 
970 int
971 pthread_setcanceltype(int type, int *oldtype)
972 {
973 	pthread_t self;
974 	int flags;
975 
976 	self = pthread__self();
977 	flags = self->pt_flags;
978 
979 	if (oldtype != NULL) {
980 		if (flags & PT_FLAG_CS_ASYNC)
981 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
982 		else
983 			*oldtype = PTHREAD_CANCEL_DEFERRED;
984 	}
985 
986 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
987 		flags |= PT_FLAG_CS_ASYNC;
988 		if (self->pt_cancel)
989 			pthread_exit(PTHREAD_CANCELED);
990 	} else if (type == PTHREAD_CANCEL_DEFERRED)
991 		flags &= ~PT_FLAG_CS_ASYNC;
992 	else
993 		return EINVAL;
994 
995 	self->pt_flags = flags;
996 
997 	return 0;
998 }
999 
1000 
1001 void
1002 pthread_testcancel()
1003 {
1004 	pthread_t self;
1005 
1006 	self = pthread__self();
1007 	if (self->pt_cancel)
1008 		pthread_exit(PTHREAD_CANCELED);
1009 }
1010 
1011 
1012 /*
1013  * POSIX requires that certain functions return an error rather than
1014  * invoking undefined behavior even when handed completely bogus
1015  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
1016  * utility routine searches the list of threads for the pthread_t
1017  * value without dereferencing it.
1018  */
1019 int
1020 pthread__find(pthread_t self, pthread_t id)
1021 {
1022 	pthread_t target;
1023 
1024 	pthread_spinlock(self, &pthread__allqueue_lock);
1025 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
1026 	    if (target == id)
1027 		    break;
1028 	pthread_spinunlock(self, &pthread__allqueue_lock);
1029 
1030 	if (target == NULL)
1031 		return ESRCH;
1032 
1033 	return 0;
1034 }
1035 
1036 
1037 void
1038 pthread__testcancel(pthread_t self)
1039 {
1040 
1041 	if (self->pt_cancel)
1042 		pthread_exit(PTHREAD_CANCELED);
1043 }
1044 
1045 
1046 void
1047 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
1048 {
1049 	pthread_t self;
1050 	struct pt_clean_t *entry;
1051 
1052 	self = pthread__self();
1053 	entry = store;
1054 	entry->ptc_cleanup = cleanup;
1055 	entry->ptc_arg = arg;
1056 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1057 }
1058 
1059 
1060 void
1061 pthread__cleanup_pop(int ex, void *store)
1062 {
1063 	pthread_t self;
1064 	struct pt_clean_t *entry;
1065 
1066 	self = pthread__self();
1067 	entry = store;
1068 
1069 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
1070 	if (ex)
1071 		(*entry->ptc_cleanup)(entry->ptc_arg);
1072 }
1073 
1074 
1075 int *
1076 pthread__errno(void)
1077 {
1078 	pthread_t self;
1079 
1080 	self = pthread__self();
1081 
1082 	return &(self->pt_errno);
1083 }
1084 
1085 void
1086 pthread__assertfunc(char *file, int line, char *function, char *expr)
1087 {
1088 	char buf[1024];
1089 	int len;
1090 
1091 	/*
1092 	 * snprintf should not acquire any locks, or we could
1093 	 * end up deadlocked if the assert caller held locks.
1094 	 */
1095 	len = snprintf(buf, 1024,
1096 	    "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
1097 	    expr, file, line,
1098 	    function ? ", function \"" : "",
1099 	    function ? function : "",
1100 	    function ? "\"" : "");
1101 
1102 	write(STDERR_FILENO, buf, (size_t)len);
1103 	(void)kill(getpid(), SIGABRT);
1104 
1105 	_exit(1);
1106 }
1107 
1108 
1109 void
1110 pthread__errorfunc(char *file, int line, char *function, char *msg)
1111 {
1112 	char buf[1024];
1113 	int len;
1114 
1115 	if (pthread__errormode == PTHREAD_ERRORMODE_IGNORE)
1116 		return;
1117 
1118 	/*
1119 	 * snprintf should not acquire any locks, or we could
1120 	 * end up deadlocked if the assert caller held locks.
1121 	 */
1122 	len = snprintf(buf, 1024,
1123 	    "Error detected, file \"%s\", line %d%s%s%s: %s.\n",
1124 	    file, line,
1125 	    function ? ", function \"" : "",
1126 	    function ? function : "",
1127 	    function ? "\"" : "",
1128 	    msg);
1129 
1130 	write(STDERR_FILENO, buf, (size_t)len);
1131 	if (pthread__errormode == PTHREAD_ERRORMODE_ABORT) {
1132 		(void)kill(getpid(), SIGABRT);
1133 
1134 		_exit(1);
1135 	}
1136 }
1137