xref: /netbsd-src/lib/libpthread/pthread.c (revision 453f6b99a313f2f372963fe81f55bf6f811e3f55)
1 /*	$NetBSD: pthread.c,v 1.8 2003/01/31 04:59:40 nathanw Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <assert.h>
40 #include <err.h>
41 #include <errno.h>
42 #include <lwp.h>
43 #include <signal.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <ucontext.h>
47 #include <sys/cdefs.h>
48 
49 #include <sched.h>
50 #include "pthread.h"
51 #include "pthread_int.h"
52 
53 
54 #undef PTHREAD_MAIN_DEBUG
55 
56 #ifdef PTHREAD_MAIN_DEBUG
57 #define SDPRINTF(x) DPRINTF(x)
58 #else
59 #define SDPRINTF(x)
60 #endif
61 
62 static void	pthread__create_tramp(void *(*start)(void *), void *arg);
63 
64 int pthread__started;
65 
66 pthread_spin_t pthread__allqueue_lock;
67 struct pthread_queue_t pthread__allqueue;
68 
69 pthread_spin_t pthread__deadqueue_lock;
70 struct pthread_queue_t pthread__deadqueue;
71 struct pthread_queue_t pthread__reidlequeue;
72 
73 static int nthreads;
74 static int nextthread;
75 static pthread_spin_t nextthread_lock;
76 static pthread_attr_t pthread_default_attr;
77 
78 pthread_spin_t pthread__runqueue_lock;
79 struct pthread_queue_t pthread__runqueue;
80 struct pthread_queue_t pthread__idlequeue;
81 
82 __strong_alias(__libc_thr_self,pthread_self)
83 __strong_alias(__libc_thr_create,pthread_create)
84 __strong_alias(__libc_thr_exit,pthread_exit)
85 __strong_alias(__libc_thr_errno,pthread__errno)
86 
87 /*
88  * Static library kludge.  Place a reference to a symbol any library
89  * file which does not already have a reference here.
90  */
91 extern int pthread__cancel_stub_binder;
92 extern int pthread__sched_binder;
93 
94 void *pthread__static_lib_binder[] = {
95 	&pthread__cancel_stub_binder,
96 	pthread_cond_init,
97 	pthread_mutex_init,
98 	pthread_rwlock_init,
99 	pthread_barrier_init,
100 	pthread_key_create,
101 	&pthread__sched_binder,
102 };
103 
104 /*
105  * This needs to be started by the library loading code, before main()
106  * gets to run, for various things that use the state of the initial thread
107  * to work properly (thread-specific data is an application-visible example;
108  * spinlock counts for mutexes is an internal example).
109  */
110 void
111 pthread_init(void)
112 {
113 	pthread_t first;
114 	extern int __isthreaded;
115 
116 	/* Initialize locks first; they're needed elsewhere. */
117 	pthread__lockprim_init();
118 
119 	/* Basic data structure setup */
120 	pthread_attr_init(&pthread_default_attr);
121 	PTQ_INIT(&pthread__allqueue);
122 	PTQ_INIT(&pthread__deadqueue);
123 	PTQ_INIT(&pthread__reidlequeue);
124 	PTQ_INIT(&pthread__runqueue);
125 	PTQ_INIT(&pthread__idlequeue);
126 
127 	/* Create the thread structure corresponding to main() */
128 	pthread__initmain(&first);
129 	pthread__initthread(first, first);
130 	first->pt_state = PT_STATE_RUNNING;
131 	sigprocmask(0, NULL, &first->pt_sigmask);
132 	PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
133 
134 	/* Start subsystems */
135 	pthread__alarm_init();
136 	pthread__signal_init();
137 	PTHREAD_MD_INIT
138 #ifdef PTHREAD__DEBUG
139 	pthread__debug_init();
140 #endif
141 
142 	/* Tell libc that we're here and it should role-play accordingly. */
143 	__isthreaded = 1;
144 }
145 
146 
147 static void
148 pthread__start(void)
149 {
150 	pthread_t self, idle;
151 	int i, ret;
152 
153 	self = pthread__self(); /* should be the "main()" thread */
154 
155 
156 	/* Create idle threads */
157 	for (i = 0; i < NIDLETHREADS; i++) {
158 		ret = pthread__stackalloc(&idle);
159 		if (ret != 0)
160 			err(1, "Couldn't allocate stack for idle thread!");
161 		pthread__initthread(self, idle);
162 		sigfillset(&idle->pt_sigmask);
163 		idle->pt_type = PT_THREAD_IDLE;
164 		PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq);
165 		pthread__sched_idle(self, idle);
166 	}
167 
168 	nthreads = 1;
169 	/* Start up the SA subsystem */
170 	pthread__sa_start();
171 	SDPRINTF(("(pthread__start %p) Started.\n", self));
172 }
173 
174 
175 /* General-purpose thread data structure sanitization. */
176 void
177 pthread__initthread(pthread_t self, pthread_t t)
178 {
179 	int id;
180 
181 	pthread_spinlock(self, &nextthread_lock);
182 	id = nextthread;
183 	nextthread++;
184 	pthread_spinunlock(self, &nextthread_lock);
185 	t->pt_num = id;
186 
187 	t->pt_magic = PT_MAGIC;
188 	t->pt_type = PT_THREAD_NORMAL;
189 	t->pt_state = PT_STATE_RUNNABLE;
190 	pthread_lockinit(&t->pt_statelock);
191 	t->pt_spinlocks = 0;
192 	t->pt_next = NULL;
193 	t->pt_exitval = NULL;
194 	t->pt_flags = 0;
195 	t->pt_cancel = 0;
196 	t->pt_errno = 0;
197 	t->pt_parent = NULL;
198 	t->pt_heldlock = NULL;
199 	t->pt_switchto = NULL;
200 	t->pt_sleepuc = NULL;
201 	sigemptyset(&t->pt_siglist);
202 	sigemptyset(&t->pt_sigmask);
203 	pthread_lockinit(&t->pt_siglock);
204 	PTQ_INIT(&t->pt_joiners);
205 	pthread_lockinit(&t->pt_join_lock);
206 	PTQ_INIT(&t->pt_cleanup_stack);
207 	memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
208 #ifdef PTHREAD__DEBUG
209 	t->blocks = 0;
210 	t->preempts = 0;
211 	t->rescheds = 0;
212 #endif
213 }
214 
215 
216 int
217 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
218 	    void *(*startfunc)(void *), void *arg)
219 {
220 	pthread_t self, newthread;
221 	pthread_attr_t nattr;
222 	int ret;
223 
224 	PTHREADD_ADD(PTHREADD_CREATE);
225 	assert(thread != NULL);
226 
227 	/*
228 	 * It's okay to check this without a lock because there can
229 	 * only be one thread before it becomes true.
230 	 */
231 	if (pthread__started == 0) {
232 		pthread__start();
233 		pthread__started = 1;
234 	}
235 
236 	if (attr == NULL)
237 		nattr = pthread_default_attr;
238 	else if (attr->pta_magic == PT_ATTR_MAGIC)
239 		nattr = *attr;
240 	else
241 		return EINVAL;
242 
243 
244 	self = pthread__self();
245 
246 	pthread_spinlock(self, &pthread__deadqueue_lock);
247 	if (!PTQ_EMPTY(&pthread__deadqueue)) {
248 		newthread = PTQ_FIRST(&pthread__deadqueue);
249 		PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq);
250 		pthread_spinunlock(self, &pthread__deadqueue_lock);
251 	} else {
252 		pthread_spinunlock(self, &pthread__deadqueue_lock);
253 		/* Set up a stack and allocate space for a pthread_st. */
254 		ret = pthread__stackalloc(&newthread);
255 		if (ret != 0)
256 			return ret;
257 	}
258 
259 	/* 2. Set up state. */
260 	pthread__initthread(self, newthread);
261 	newthread->pt_flags = nattr.pta_flags;
262 	newthread->pt_sigmask = self->pt_sigmask;
263 
264 	/*
265 	 * 3. Set up context.
266 	 *
267 	 * The pt_uc pointer points to a location safely below the
268 	 * stack start; this is arranged by pthread__stackalloc().
269 	 */
270 	_INITCONTEXT_U(newthread->pt_uc);
271 	newthread->pt_uc->uc_stack = newthread->pt_stack;
272 	newthread->pt_uc->uc_link = NULL;
273 	makecontext(newthread->pt_uc, pthread__create_tramp, 2,
274 	    startfunc, arg);
275 
276 	/* 4. Add to list of all threads. */
277 	pthread_spinlock(self, &pthread__allqueue_lock);
278 	PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq);
279 	nthreads++;
280 	pthread_spinunlock(self, &pthread__allqueue_lock);
281 
282 	SDPRINTF(("(pthread_create %p) Created new thread %p.\n", self, newthread));
283 	/* 5. Put on run queue. */
284 	pthread__sched(self, newthread);
285 
286 	*thread = newthread;
287 
288 	return 0;
289 }
290 
291 
292 static void
293 pthread__create_tramp(void *(*start)(void *), void *arg)
294 {
295 	void *retval;
296 
297 	retval = start(arg);
298 
299 	pthread_exit(retval);
300 
301 	/*NOTREACHED*//*CONSTCOND*/
302 	assert(0);
303 }
304 
305 
306 /*
307  * Other threads will switch to the idle thread so that they
308  * can dispose of any awkward locks or recycle upcall state.
309  */
310 void
311 pthread__idle(void)
312 {
313 	pthread_t self;
314 
315 	PTHREADD_ADD(PTHREADD_IDLE);
316 	self = pthread__self();
317 	SDPRINTF(("(pthread__idle %p).\n", self));
318 
319 	/*
320 	 * The drill here is that we want to yield the processor,
321 	 * but for the thread itself to be recovered, we need to be on
322 	 * a list somewhere for the thread system to know about us.
323 	 */
324 	pthread_spinlock(self, &pthread__deadqueue_lock);
325 	PTQ_INSERT_TAIL(&pthread__reidlequeue, self, pt_runq);
326 	self->pt_flags |= PT_FLAG_IDLED;
327 	pthread_spinunlock(self, &pthread__deadqueue_lock);
328 
329 	/*
330 	 * If we get to run this, then no preemption has happened
331 	 * (because the upcall handler will not continue an idle thread with
332 	 * PT_FLAG_IDLED set), and so we can yield the processor safely.
333 	 */
334 	SDPRINTF(("(pthread__idle %p) yielding.\n", self));
335 	sa_yield();
336 
337 	/* NOTREACHED */
338 	self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */
339 	SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self));
340 	/* CONSTCOND */
341 	assert(0);
342 }
343 
344 
345 void
346 pthread_exit(void *retval)
347 {
348 	pthread_t self;
349 	struct pt_clean_t *cleanup;
350 	int nt;
351 
352 	self = pthread__self();
353 	SDPRINTF(("(pthread_exit %p) Exiting.\n", self));
354 
355 	/* Disable cancellability. */
356 	self->pt_flags |= PT_FLAG_CS_DISABLED;
357 
358 	/* Call any cancellation cleanup handlers */
359 	while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
360 		cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
361 		PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
362 		(*cleanup->ptc_cleanup)(cleanup->ptc_arg);
363 	}
364 
365 	/* Perform cleanup of thread-specific data */
366 	pthread__destroy_tsd(self);
367 
368 	self->pt_exitval = retval;
369 
370 	pthread_spinlock(self, &self->pt_join_lock);
371 	if (self->pt_flags & PT_FLAG_DETACHED) {
372 		pthread_spinunlock(self, &self->pt_join_lock);
373 
374 		pthread_spinlock(self, &pthread__allqueue_lock);
375 		PTQ_REMOVE(&pthread__allqueue, self, pt_allq);
376 		nthreads--;
377 		nt = nthreads;
378 		pthread_spinunlock(self, &pthread__allqueue_lock);
379 
380 		self->pt_state = PT_STATE_DEAD;
381 		if (nt == 0) {
382 			/* Whoah, we're the last one. Time to go. */
383 			exit(0);
384 		}
385 
386 		/* Yeah, yeah, doing work while we're dead is tacky. */
387 		pthread_spinlock(self, &pthread__deadqueue_lock);
388 		PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq);
389 		pthread__block(self, &pthread__deadqueue_lock);
390 	} else {
391 		pthread_spinlock(self, &pthread__allqueue_lock);
392 		nthreads--;
393 		nt = nthreads;
394 		self->pt_state = PT_STATE_ZOMBIE;
395 		pthread_spinunlock(self, &pthread__allqueue_lock);
396 		if (nt == 0) {
397 			/* Whoah, we're the last one. Time to go. */
398 			exit(0);
399 		}
400 		/*
401 		 * Wake up all the potential joiners. Only one can win.
402 		 * (Can you say "Thundering Herd"? I knew you could.)
403 		 */
404 		pthread__sched_sleepers(self, &self->pt_joiners);
405 		pthread__block(self, &self->pt_join_lock);
406 	}
407 
408 	/*NOTREACHED*//*CONSTCOND*/
409 	assert(0);
410 	exit(1);
411 }
412 
413 
414 int
415 pthread_join(pthread_t thread, void **valptr)
416 {
417 	pthread_t self;
418 	int num;
419 
420 	self = pthread__self();
421 	SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
422 
423 	if (pthread__find(self, thread) != 0)
424 		return ESRCH;
425 
426 	if (thread->pt_magic != PT_MAGIC)
427 		return EINVAL;
428 
429 	if (thread == self)
430 		return EDEADLK;
431 
432 	pthread_spinlock(self, &thread->pt_join_lock);
433 
434 	if (thread->pt_flags & PT_FLAG_DETACHED) {
435 		pthread_spinunlock(self, &thread->pt_join_lock);
436 		return EINVAL;
437 	}
438 
439 	num = thread->pt_num;
440 	while (thread->pt_state != PT_STATE_ZOMBIE) {
441 		if ((thread->pt_state == PT_STATE_DEAD) ||
442 		    (thread->pt_flags & PT_FLAG_DETACHED) ||
443 		    (thread->pt_num != num)) {
444 			/*
445 			 * Another thread beat us to the join, or called
446 			 * pthread_detach(). If num didn't match, the
447 			 * thread died and was recycled before we got
448 			 * another chance to run.
449 			 */
450 			pthread_spinunlock(self, &thread->pt_join_lock);
451 			return ESRCH;
452 		}
453 		/*
454 		 * "I'm not dead yet!"
455 		 * "You will be soon enough."
456 		 */
457 		pthread_spinlock(self, &self->pt_statelock);
458 		if (self->pt_cancel) {
459 			pthread_spinunlock(self, &self->pt_statelock);
460 			pthread_spinunlock(self, &thread->pt_join_lock);
461 			pthread_exit(PTHREAD_CANCELED);
462 		}
463 		self->pt_state = PT_STATE_BLOCKED_QUEUE;
464 		self->pt_sleepobj = thread;
465 		self->pt_sleepq = &thread->pt_joiners;
466 		self->pt_sleeplock = &thread->pt_join_lock;
467 		pthread_spinunlock(self, &self->pt_statelock);
468 
469 		PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
470 		pthread__block(self, &thread->pt_join_lock);
471 		pthread_spinlock(self, &thread->pt_join_lock);
472 	}
473 
474 	/* All ours. */
475 	thread->pt_state = PT_STATE_DEAD;
476 	pthread_spinunlock(self, &thread->pt_join_lock);
477 
478 	if (valptr != NULL)
479 		*valptr = thread->pt_exitval;
480 
481 	SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
482 
483 	/* Cleanup time. Move the dead thread from allqueue to the deadqueue */
484 	pthread_spinlock(self, &pthread__allqueue_lock);
485 	PTQ_REMOVE(&pthread__allqueue, thread, pt_allq);
486 	pthread_spinunlock(self, &pthread__allqueue_lock);
487 
488 	pthread_spinlock(self, &pthread__deadqueue_lock);
489 	PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq);
490 	pthread_spinunlock(self, &pthread__deadqueue_lock);
491 
492 	return 0;
493 }
494 
495 
496 int
497 pthread_equal(pthread_t t1, pthread_t t2)
498 {
499 
500 	/* Nothing special here. */
501 	return (t1 == t2);
502 }
503 
504 
505 int
506 pthread_detach(pthread_t thread)
507 {
508 	pthread_t self;
509 
510 	self = pthread__self();
511 
512 	if (pthread__find(self, thread) != 0)
513 		return ESRCH;
514 
515 	if (thread->pt_magic != PT_MAGIC)
516 		return EINVAL;
517 
518 	pthread_spinlock(self, &thread->pt_join_lock);
519 
520 	if (thread->pt_flags & PT_FLAG_DETACHED) {
521 		pthread_spinunlock(self, &thread->pt_join_lock);
522 		return EINVAL;
523 	}
524 
525 	thread->pt_flags |= PT_FLAG_DETACHED;
526 
527 	/* Any joiners have to be punted now. */
528 	pthread__sched_sleepers(self, &thread->pt_joiners);
529 
530 	pthread_spinunlock(self, &thread->pt_join_lock);
531 
532 	return 0;
533 }
534 
535 
536 int
537 pthread_attr_init(pthread_attr_t *attr)
538 {
539 
540 	attr->pta_magic = PT_ATTR_MAGIC;
541 	attr->pta_flags = 0;
542 
543 	return 0;
544 }
545 
546 
547 int
548 /*ARGSUSED*/
549 pthread_attr_destroy(pthread_attr_t *attr)
550 {
551 
552 	return 0;
553 }
554 
555 
556 int
557 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
558 {
559 
560 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
561 		return EINVAL;
562 
563 	*detachstate = (attr->pta_flags & PT_FLAG_DETACHED);
564 
565 	return 0;
566 }
567 
568 
569 int
570 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
571 {
572 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
573 		return EINVAL;
574 
575 	switch (detachstate) {
576 	case PTHREAD_CREATE_JOINABLE:
577 		attr->pta_flags &= ~PT_FLAG_DETACHED;
578 		break;
579 	case PTHREAD_CREATE_DETACHED:
580 		attr->pta_flags |= PT_FLAG_DETACHED;
581 		break;
582 	default:
583 		return EINVAL;
584 	}
585 
586 	return 0;
587 }
588 
589 
590 int
591 pthread_attr_setschedparam(pthread_attr_t *attr,
592     const struct sched_param *param)
593 {
594 
595 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
596 		return EINVAL;
597 
598 	if (param == NULL)
599 		return EINVAL;
600 
601 	if (param->sched_priority != 0)
602 		return EINVAL;
603 
604 	return 0;
605 }
606 
607 
608 int
609 pthread_attr_getschedparam(const pthread_attr_t *attr,
610     struct sched_param *param)
611 {
612 
613 	if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
614 		return EINVAL;
615 
616 	if (param == NULL)
617 		return EINVAL;
618 
619 	param->sched_priority = 0;
620 
621 	return 0;
622 }
623 
624 
625 /*
626  * XXX There should be a way for applications to use the efficent
627  *  inline version, but there are opacity/namespace issues.
628  */
629 pthread_t
630 pthread_self(void)
631 {
632 
633 	return pthread__self();
634 }
635 
636 
637 int
638 pthread_cancel(pthread_t thread)
639 {
640 	pthread_t self;
641 	int flags;
642 
643 	if (!(thread->pt_state == PT_STATE_RUNNING ||
644 	    thread->pt_state == PT_STATE_RUNNABLE ||
645 	    thread->pt_state == PT_STATE_BLOCKED_QUEUE ||
646 	    thread->pt_state == PT_STATE_BLOCKED_SYS))
647 		return ESRCH;
648 
649 	self = pthread__self();
650 	flags = thread->pt_flags;
651 
652 	flags |= PT_FLAG_CS_PENDING;
653 	if ((flags & PT_FLAG_CS_DISABLED) == 0) {
654 		thread->pt_cancel = 1;
655 		pthread_spinlock(self, &thread->pt_statelock);
656 		if (thread->pt_state == PT_STATE_BLOCKED_SYS) {
657 			/*
658 			 * It's sleeping in the kernel. If we can wake
659 			 * it up, it will notice the cancellation when
660 			 * it returns. If it doesn't wake up when we
661 			 * make this call, then it's blocked
662 			 * uninterruptably in the kernel, and there's
663 			 * not much to be done about it.
664 			 */
665 			_lwp_wakeup(thread->pt_blockedlwp);
666 		} else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
667 			/*
668 			 * We're blocked somewhere (pthread__block()
669 			 * was called. Cause it to wake up and the
670 			 * caller will check for the cancellation.
671 			 */
672 			pthread_spinlock(self, thread->pt_sleeplock);
673 			PTQ_REMOVE(thread->pt_sleepq, thread,
674 			    pt_sleep);
675 			pthread_spinunlock(self, thread->pt_sleeplock);
676 			pthread__sched(self, thread);
677 		} else {
678 			/*
679 			 * Nothing. The target thread is running and will
680 			 * notice at the next deferred cancellation point.
681 			 */
682 		}
683 		pthread_spinunlock(self, &thread->pt_statelock);
684 	}
685 
686 	thread->pt_flags = flags;
687 
688 	return 0;
689 }
690 
691 
692 int
693 pthread_setcancelstate(int state, int *oldstate)
694 {
695 	pthread_t self;
696 	int flags;
697 
698 	self = pthread__self();
699 	flags = self->pt_flags;
700 
701 	if (oldstate != NULL) {
702 		if (flags & PT_FLAG_CS_DISABLED)
703 			*oldstate = PTHREAD_CANCEL_DISABLE;
704 		else
705 			*oldstate = PTHREAD_CANCEL_ENABLE;
706 	}
707 
708 	if (state == PTHREAD_CANCEL_DISABLE)
709 		flags |= PT_FLAG_CS_DISABLED;
710 	else if (state == PTHREAD_CANCEL_ENABLE) {
711 		flags &= ~PT_FLAG_CS_DISABLED;
712 		/*
713 		 * If a cancellation was requested while cancellation
714 		 * was disabled, note that fact for future
715 		 * cancellation tests.
716 		 */
717 		if (flags & PT_FLAG_CS_PENDING) {
718 			self->pt_cancel = 1;
719 			/* This is not a deferred cancellation point. */
720 			if (flags & PT_FLAG_CS_ASYNC)
721 				pthread_exit(PTHREAD_CANCELED);
722 		}
723 	} else
724 		return EINVAL;
725 
726 	self->pt_flags = flags;
727 
728 	return 0;
729 }
730 
731 
732 int
733 pthread_setcanceltype(int type, int *oldtype)
734 {
735 	pthread_t self;
736 	int flags;
737 
738 	self = pthread__self();
739 	flags = self->pt_flags;
740 
741 	if (oldtype != NULL) {
742 		if (flags & PT_FLAG_CS_ASYNC)
743 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
744 		else
745 			*oldtype = PTHREAD_CANCEL_DEFERRED;
746 	}
747 
748 	if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
749 		flags |= PT_FLAG_CS_ASYNC;
750 		if (self->pt_cancel)
751 			pthread_exit(PTHREAD_CANCELED);
752 	} else if (type == PTHREAD_CANCEL_DEFERRED)
753 		flags &= ~PT_FLAG_CS_ASYNC;
754 	else
755 		return EINVAL;
756 
757 	self->pt_flags = flags;
758 
759 	return 0;
760 }
761 
762 
763 void
764 pthread_testcancel()
765 {
766 	pthread_t self;
767 
768 	self = pthread__self();
769 	if (self->pt_cancel)
770 		pthread_exit(PTHREAD_CANCELED);
771 }
772 
773 
774 /*
775  * POSIX requires that certain functions return an error rather than
776  * invoking undefined behavior even when handed completely bogus
777  * pthread_t values, e.g. stack garbage or (pthread_t)666. This
778  * utility routine searches the list of threads for the pthread_t
779  * value without dereferencing it.
780  */
781 int
782 pthread__find(pthread_t self, pthread_t id)
783 {
784 	pthread_t target;
785 
786 	pthread_spinlock(self, &pthread__allqueue_lock);
787 	PTQ_FOREACH(target, &pthread__allqueue, pt_allq)
788 	    if (target == id)
789 		    break;
790 	pthread_spinunlock(self, &pthread__allqueue_lock);
791 
792 	if (target == NULL)
793 		return ESRCH;
794 
795 	return 0;
796 }
797 
798 
799 void
800 pthread__testcancel(pthread_t self)
801 {
802 
803 	if (self->pt_cancel)
804 		pthread_exit(PTHREAD_CANCELED);
805 }
806 
807 
808 void
809 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
810 {
811 	pthread_t self;
812 	struct pt_clean_t *entry;
813 
814 	self = pthread__self();
815 	entry = store;
816 	entry->ptc_cleanup = cleanup;
817 	entry->ptc_arg = arg;
818 	PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
819 }
820 
821 
822 void
823 pthread__cleanup_pop(int ex, void *store)
824 {
825 	pthread_t self;
826 	struct pt_clean_t *entry;
827 
828 	self = pthread__self();
829 	entry = store;
830 
831 	PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
832 	if (ex)
833 		(*entry->ptc_cleanup)(entry->ptc_arg);
834 }
835 
836 
837 int *
838 pthread__errno(void)
839 {
840 	pthread_t self;
841 
842 	self = pthread__self();
843 
844 	return &(self->pt_errno);
845 }
846