1 /* $NetBSD: pthread.c,v 1.48 2006/04/24 18:39:36 drochner Exp $ */ 2 3 /*- 4 * Copyright (c) 2001,2002,2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __RCSID("$NetBSD: pthread.c,v 1.48 2006/04/24 18:39:36 drochner Exp $"); 41 42 #include <err.h> 43 #include <errno.h> 44 #include <lwp.h> 45 #include <signal.h> 46 #include <stdio.h> 47 #include <stdlib.h> 48 #include <string.h> 49 #include <syslog.h> 50 #include <ucontext.h> 51 #include <unistd.h> 52 #include <sys/param.h> 53 #include <sys/sysctl.h> 54 #ifdef PTHREAD_MLOCK_KLUDGE 55 #include <sys/mman.h> 56 #endif 57 58 #include <sched.h> 59 #include "pthread.h" 60 #include "pthread_int.h" 61 62 #ifdef PTHREAD_MAIN_DEBUG 63 #define SDPRINTF(x) DPRINTF(x) 64 #else 65 #define SDPRINTF(x) 66 #endif 67 68 static void pthread__create_tramp(void *(*start)(void *), void *arg); 69 static void pthread__dead(pthread_t, pthread_t); 70 71 int pthread__started; 72 73 pthread_spin_t pthread__allqueue_lock = __SIMPLELOCK_UNLOCKED; 74 struct pthread_queue_t pthread__allqueue; 75 76 pthread_spin_t pthread__deadqueue_lock = __SIMPLELOCK_UNLOCKED; 77 struct pthread_queue_t pthread__deadqueue; 78 struct pthread_queue_t *pthread__reidlequeue; 79 80 static int nthreads; 81 static int nextthread; 82 static pthread_spin_t nextthread_lock = __SIMPLELOCK_UNLOCKED; 83 static pthread_attr_t pthread_default_attr; 84 85 enum { 86 DIAGASSERT_ABORT = 1<<0, 87 DIAGASSERT_STDERR = 1<<1, 88 DIAGASSERT_SYSLOG = 1<<2 89 }; 90 91 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR; 92 93 pthread_spin_t pthread__runqueue_lock = __SIMPLELOCK_UNLOCKED; 94 struct pthread_queue_t pthread__runqueue; 95 struct pthread_queue_t pthread__idlequeue; 96 struct pthread_queue_t pthread__suspqueue; 97 98 int pthread__concurrency, pthread__maxconcurrency; 99 100 int _sys___sigprocmask14(int, const sigset_t *, sigset_t *); 101 102 __strong_alias(__libc_thr_self,pthread_self) 103 __strong_alias(__libc_thr_create,pthread_create) 104 __strong_alias(__libc_thr_exit,pthread_exit) 105 __strong_alias(__libc_thr_errno,pthread__errno) 106 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate) 107 108 /* 109 * Static library kludge. Place a reference to a symbol any library 110 * file which does not already have a reference here. 111 */ 112 extern int pthread__cancel_stub_binder; 113 extern int pthread__sched_binder; 114 extern struct pthread_queue_t pthread__nanosleeping; 115 116 void *pthread__static_lib_binder[] = { 117 &pthread__cancel_stub_binder, 118 pthread_cond_init, 119 pthread_mutex_init, 120 pthread_rwlock_init, 121 pthread_barrier_init, 122 pthread_key_create, 123 pthread_setspecific, 124 &pthread__sched_binder, 125 &pthread__nanosleeping 126 }; 127 128 /* 129 * This needs to be started by the library loading code, before main() 130 * gets to run, for various things that use the state of the initial thread 131 * to work properly (thread-specific data is an application-visible example; 132 * spinlock counts for mutexes is an internal example). 133 */ 134 void 135 pthread_init(void) 136 { 137 pthread_t first; 138 char *p; 139 int i, mib[2], ncpu; 140 size_t len; 141 extern int __isthreaded; 142 #ifdef PTHREAD_MLOCK_KLUDGE 143 int ret; 144 #endif 145 146 mib[0] = CTL_HW; 147 mib[1] = HW_NCPU; 148 149 len = sizeof(ncpu); 150 sysctl(mib, 2, &ncpu, &len, NULL, 0); 151 152 /* Initialize locks first; they're needed elsewhere. */ 153 pthread__lockprim_init(ncpu); 154 155 /* Find out requested/possible concurrency */ 156 p = getenv("PTHREAD_CONCURRENCY"); 157 pthread__maxconcurrency = p ? atoi(p) : 1; 158 159 if (pthread__maxconcurrency < 1) 160 pthread__maxconcurrency = 1; 161 if (pthread__maxconcurrency > ncpu) 162 pthread__maxconcurrency = ncpu; 163 164 /* Allocate data structures */ 165 pthread__reidlequeue = (struct pthread_queue_t *)malloc 166 (pthread__maxconcurrency * sizeof(struct pthread_queue_t)); 167 if (pthread__reidlequeue == NULL) 168 err(1, "Couldn't allocate memory for pthread__reidlequeue"); 169 170 /* Basic data structure setup */ 171 pthread_attr_init(&pthread_default_attr); 172 PTQ_INIT(&pthread__allqueue); 173 PTQ_INIT(&pthread__deadqueue); 174 #ifdef PTHREAD_MLOCK_KLUDGE 175 ret = mlock(&pthread__deadqueue, sizeof(pthread__deadqueue)); 176 pthread__assert(ret == 0); 177 #endif 178 PTQ_INIT(&pthread__runqueue); 179 PTQ_INIT(&pthread__idlequeue); 180 for (i = 0; i < pthread__maxconcurrency; i++) 181 PTQ_INIT(&pthread__reidlequeue[i]); 182 nthreads = 1; 183 184 /* Create the thread structure corresponding to main() */ 185 pthread__initmain(&first); 186 pthread__initthread(first, first); 187 first->pt_state = PT_STATE_RUNNING; 188 _sys___sigprocmask14(0, NULL, &first->pt_sigmask); 189 PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq); 190 191 /* Start subsystems */ 192 pthread__signal_init(); 193 PTHREAD_MD_INIT 194 #ifdef PTHREAD__DEBUG 195 pthread__debug_init(ncpu); 196 #endif 197 198 for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) { 199 switch (*p) { 200 case 'a': 201 pthread__diagassert |= DIAGASSERT_ABORT; 202 break; 203 case 'A': 204 pthread__diagassert &= ~DIAGASSERT_ABORT; 205 break; 206 case 'e': 207 pthread__diagassert |= DIAGASSERT_STDERR; 208 break; 209 case 'E': 210 pthread__diagassert &= ~DIAGASSERT_STDERR; 211 break; 212 case 'l': 213 pthread__diagassert |= DIAGASSERT_SYSLOG; 214 break; 215 case 'L': 216 pthread__diagassert &= ~DIAGASSERT_SYSLOG; 217 break; 218 } 219 } 220 221 222 /* Tell libc that we're here and it should role-play accordingly. */ 223 __isthreaded = 1; 224 } 225 226 static void 227 pthread__child_callback(void) 228 { 229 /* 230 * Clean up data structures that a forked child process might 231 * trip over. Note that if threads have been created (causing 232 * this handler to be registered) the standards say that the 233 * child will trigger undefined behavior if it makes any 234 * pthread_* calls (or any other calls that aren't 235 * async-signal-safe), so we don't really have to clean up 236 * much. Anything that permits some pthread_* calls to work is 237 * merely being polite. 238 */ 239 pthread__started = 0; 240 } 241 242 static void 243 pthread__start(void) 244 { 245 pthread_t self, idle; 246 int i, ret; 247 248 self = pthread__self(); /* should be the "main()" thread */ 249 250 /* 251 * Per-process timers are cleared by fork(); despite the 252 * various restrictions on fork() and threads, it's legal to 253 * fork() before creating any threads. 254 */ 255 pthread__alarm_init(); 256 257 pthread__signal_start(); 258 259 pthread_atfork(NULL, NULL, pthread__child_callback); 260 261 /* 262 * Create idle threads 263 * XXX need to create more idle threads if concurrency > 3 264 */ 265 for (i = 0; i < NIDLETHREADS; i++) { 266 ret = pthread__stackalloc(&idle); 267 if (ret != 0) 268 err(1, "Couldn't allocate stack for idle thread!"); 269 pthread__initthread(self, idle); 270 sigfillset(&idle->pt_sigmask); 271 idle->pt_type = PT_THREAD_IDLE; 272 PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq); 273 pthread__sched_idle(self, idle); 274 } 275 276 /* Start up the SA subsystem */ 277 pthread__sa_start(); 278 SDPRINTF(("(pthread__start %p) Started.\n", self)); 279 } 280 281 282 /* General-purpose thread data structure sanitization. */ 283 void 284 pthread__initthread(pthread_t self, pthread_t t) 285 { 286 int id; 287 288 pthread_spinlock(self, &nextthread_lock); 289 id = nextthread; 290 nextthread++; 291 pthread_spinunlock(self, &nextthread_lock); 292 t->pt_num = id; 293 294 t->pt_magic = PT_MAGIC; 295 t->pt_type = PT_THREAD_NORMAL; 296 t->pt_state = PT_STATE_RUNNABLE; 297 pthread_lockinit(&t->pt_statelock); 298 pthread_lockinit(&t->pt_flaglock); 299 t->pt_spinlocks = 0; 300 t->pt_next = NULL; 301 t->pt_exitval = NULL; 302 t->pt_flags = 0; 303 t->pt_cancel = 0; 304 t->pt_errno = 0; 305 t->pt_parent = NULL; 306 t->pt_heldlock = NULL; 307 t->pt_switchto = NULL; 308 t->pt_trapuc = NULL; 309 sigemptyset(&t->pt_siglist); 310 sigemptyset(&t->pt_sigmask); 311 pthread_lockinit(&t->pt_siglock); 312 PTQ_INIT(&t->pt_joiners); 313 pthread_lockinit(&t->pt_join_lock); 314 PTQ_INIT(&t->pt_cleanup_stack); 315 memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX); 316 t->pt_name = NULL; 317 #ifdef PTHREAD__DEBUG 318 t->blocks = 0; 319 t->preempts = 0; 320 t->rescheds = 0; 321 #endif 322 } 323 324 325 int 326 pthread_create(pthread_t *thread, const pthread_attr_t *attr, 327 void *(*startfunc)(void *), void *arg) 328 { 329 pthread_t self, newthread; 330 pthread_attr_t nattr; 331 struct pthread_attr_private *p; 332 char *name; 333 int ret; 334 335 PTHREADD_ADD(PTHREADD_CREATE); 336 337 /* 338 * It's okay to check this without a lock because there can 339 * only be one thread before it becomes true. 340 */ 341 if (pthread__started == 0) { 342 pthread__start(); 343 pthread__started = 1; 344 } 345 346 if (attr == NULL) 347 nattr = pthread_default_attr; 348 else if (attr->pta_magic == PT_ATTR_MAGIC) 349 nattr = *attr; 350 else 351 return EINVAL; 352 353 /* Fetch misc. attributes from the attr structure. */ 354 name = NULL; 355 if ((p = nattr.pta_private) != NULL) 356 if (p->ptap_name[0] != '\0') 357 if ((name = strdup(p->ptap_name)) == NULL) 358 return ENOMEM; 359 360 self = pthread__self(); 361 362 pthread_spinlock(self, &pthread__deadqueue_lock); 363 if (!PTQ_EMPTY(&pthread__deadqueue)) { 364 newthread = PTQ_FIRST(&pthread__deadqueue); 365 PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq); 366 pthread_spinunlock(self, &pthread__deadqueue_lock); 367 } else { 368 pthread_spinunlock(self, &pthread__deadqueue_lock); 369 /* Set up a stack and allocate space for a pthread_st. */ 370 ret = pthread__stackalloc(&newthread); 371 if (ret != 0) { 372 if (name) 373 free(name); 374 return ret; 375 } 376 } 377 378 /* 2. Set up state. */ 379 pthread__initthread(self, newthread); 380 newthread->pt_flags = nattr.pta_flags; 381 newthread->pt_sigmask = self->pt_sigmask; 382 383 /* 3. Set up misc. attributes. */ 384 newthread->pt_name = name; 385 386 /* 387 * 4. Set up context. 388 * 389 * The pt_uc pointer points to a location safely below the 390 * stack start; this is arranged by pthread__stackalloc(). 391 */ 392 _INITCONTEXT_U(newthread->pt_uc); 393 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER 394 pthread__uc_id(newthread->pt_uc) = newthread; 395 #endif 396 newthread->pt_uc->uc_stack = newthread->pt_stack; 397 newthread->pt_uc->uc_link = NULL; 398 makecontext(newthread->pt_uc, pthread__create_tramp, 2, 399 startfunc, arg); 400 401 /* 5. Add to list of all threads. */ 402 pthread_spinlock(self, &pthread__allqueue_lock); 403 PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq); 404 nthreads++; 405 pthread_spinunlock(self, &pthread__allqueue_lock); 406 407 SDPRINTF(("(pthread_create %p) new thread %p (name pointer %p).\n", 408 self, newthread, newthread->pt_name)); 409 /* 6. Put on appropriate queue. */ 410 if (newthread->pt_flags & PT_FLAG_SUSPENDED) { 411 pthread_spinlock(self, &newthread->pt_statelock); 412 pthread__suspend(self, newthread); 413 pthread_spinunlock(self, &newthread->pt_statelock); 414 } else 415 pthread__sched(self, newthread); 416 417 *thread = newthread; 418 419 return 0; 420 } 421 422 423 static void 424 pthread__create_tramp(void *(*start)(void *), void *arg) 425 { 426 void *retval; 427 428 retval = (*start)(arg); 429 430 pthread_exit(retval); 431 432 /*NOTREACHED*/ 433 pthread__abort(); 434 } 435 436 int 437 pthread_suspend_np(pthread_t thread) 438 { 439 pthread_t self; 440 441 self = pthread__self(); 442 if (self == thread) { 443 return EDEADLK; 444 } 445 #ifdef ERRORCHECK 446 if (pthread__find(self, thread) != 0) 447 return ESRCH; 448 #endif 449 SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p (state %d).\n", 450 self, thread, thread->pt_state)); 451 pthread_spinlock(self, &thread->pt_statelock); 452 if (thread->pt_blockgen != thread->pt_unblockgen) { 453 /* XXX flaglock? */ 454 thread->pt_flags |= PT_FLAG_SUSPENDED; 455 pthread_spinunlock(self, &thread->pt_statelock); 456 return 0; 457 } 458 switch (thread->pt_state) { 459 case PT_STATE_RUNNING: 460 pthread__abort(); /* XXX */ 461 break; 462 case PT_STATE_SUSPENDED: 463 pthread_spinunlock(self, &thread->pt_statelock); 464 return 0; 465 case PT_STATE_RUNNABLE: 466 pthread_spinlock(self, &pthread__runqueue_lock); 467 PTQ_REMOVE(&pthread__runqueue, thread, pt_runq); 468 pthread_spinunlock(self, &pthread__runqueue_lock); 469 break; 470 case PT_STATE_BLOCKED_QUEUE: 471 pthread_spinlock(self, thread->pt_sleeplock); 472 PTQ_REMOVE(thread->pt_sleepq, thread, pt_sleep); 473 pthread_spinunlock(self, thread->pt_sleeplock); 474 break; 475 case PT_STATE_ZOMBIE: 476 goto out; 477 default: 478 break; /* XXX */ 479 } 480 pthread__suspend(self, thread); 481 482 out: 483 pthread_spinunlock(self, &thread->pt_statelock); 484 return 0; 485 } 486 487 int 488 pthread_resume_np(pthread_t thread) 489 { 490 pthread_t self; 491 492 self = pthread__self(); 493 #ifdef ERRORCHECK 494 if (pthread__find(self, thread) != 0) 495 return ESRCH; 496 #endif 497 SDPRINTF(("(pthread_resume_np %p) Resume thread %p (state %d).\n", 498 self, thread, thread->pt_state)); 499 pthread_spinlock(self, &thread->pt_statelock); 500 /* XXX flaglock? */ 501 thread->pt_flags &= ~PT_FLAG_SUSPENDED; 502 if (thread->pt_state == PT_STATE_SUSPENDED) { 503 pthread_spinlock(self, &pthread__runqueue_lock); 504 PTQ_REMOVE(&pthread__suspqueue, thread, pt_runq); 505 pthread_spinunlock(self, &pthread__runqueue_lock); 506 pthread__sched(self, thread); 507 } 508 pthread_spinunlock(self, &thread->pt_statelock); 509 return 0; 510 } 511 512 513 /* 514 * Other threads will switch to the idle thread so that they 515 * can dispose of any awkward locks or recycle upcall state. 516 */ 517 void 518 pthread__idle(void) 519 { 520 pthread_t self; 521 522 PTHREADD_ADD(PTHREADD_IDLE); 523 self = pthread__self(); 524 SDPRINTF(("(pthread__idle %p).\n", self)); 525 526 /* 527 * The drill here is that we want to yield the processor, 528 * but for the thread itself to be recovered, we need to be on 529 * a list somewhere for the thread system to know about us. 530 */ 531 pthread_spinlock(self, &pthread__deadqueue_lock); 532 PTQ_INSERT_TAIL(&pthread__reidlequeue[self->pt_vpid], self, pt_runq); 533 pthread__concurrency--; 534 SDPRINTF(("(yield %p concurrency) now %d\n", self, 535 pthread__concurrency)); 536 /* Don't need a flag lock; nothing else has a handle on this thread */ 537 self->pt_flags |= PT_FLAG_IDLED; 538 pthread_spinunlock(self, &pthread__deadqueue_lock); 539 540 /* 541 * If we get to run this, then no preemption has happened 542 * (because the upcall handler will not continue an idle thread with 543 * PT_FLAG_IDLED set), and so we can yield the processor safely. 544 */ 545 SDPRINTF(("(pthread__idle %p) yielding.\n", self)); 546 sa_yield(); 547 548 /* NOTREACHED */ 549 self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */ 550 SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self)); 551 pthread__abort(); 552 } 553 554 555 void 556 pthread_exit(void *retval) 557 { 558 pthread_t self; 559 struct pt_clean_t *cleanup; 560 char *name; 561 int nt; 562 563 self = pthread__self(); 564 SDPRINTF(("(pthread_exit %p) status %p, flags %x, cancel %d\n", 565 self, retval, self->pt_flags, self->pt_cancel)); 566 567 /* Disable cancellability. */ 568 pthread_spinlock(self, &self->pt_flaglock); 569 self->pt_flags |= PT_FLAG_CS_DISABLED; 570 self->pt_cancel = 0; 571 pthread_spinunlock(self, &self->pt_flaglock); 572 573 /* Call any cancellation cleanup handlers */ 574 while (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 575 cleanup = PTQ_FIRST(&self->pt_cleanup_stack); 576 PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next); 577 (*cleanup->ptc_cleanup)(cleanup->ptc_arg); 578 } 579 580 /* Perform cleanup of thread-specific data */ 581 pthread__destroy_tsd(self); 582 583 self->pt_exitval = retval; 584 585 /* 586 * it's safe to check PT_FLAG_DETACHED without pt_flaglock 587 * because it's only set by pthread_detach with pt_join_lock held. 588 */ 589 pthread_spinlock(self, &self->pt_join_lock); 590 if (self->pt_flags & PT_FLAG_DETACHED) { 591 self->pt_state = PT_STATE_DEAD; 592 pthread_spinunlock(self, &self->pt_join_lock); 593 name = self->pt_name; 594 self->pt_name = NULL; 595 596 if (name != NULL) 597 free(name); 598 599 pthread_spinlock(self, &pthread__allqueue_lock); 600 PTQ_REMOVE(&pthread__allqueue, self, pt_allq); 601 nthreads--; 602 nt = nthreads; 603 pthread_spinunlock(self, &pthread__allqueue_lock); 604 605 if (nt == 0) { 606 /* Whoah, we're the last one. Time to go. */ 607 exit(0); 608 } 609 610 /* Yeah, yeah, doing work while we're dead is tacky. */ 611 pthread_spinlock(self, &pthread__deadqueue_lock); 612 PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq); 613 pthread__block(self, &pthread__deadqueue_lock); 614 SDPRINTF(("(pthread_exit %p) walking dead\n", self)); 615 } else { 616 self->pt_state = PT_STATE_ZOMBIE; 617 /* Note: name will be freed by the joiner. */ 618 pthread_spinlock(self, &pthread__allqueue_lock); 619 nthreads--; 620 nt = nthreads; 621 pthread_spinunlock(self, &pthread__allqueue_lock); 622 if (nt == 0) { 623 /* Whoah, we're the last one. Time to go. */ 624 exit(0); 625 } 626 /* 627 * Wake up all the potential joiners. Only one can win. 628 * (Can you say "Thundering Herd"? I knew you could.) 629 */ 630 pthread__sched_sleepers(self, &self->pt_joiners); 631 pthread__block(self, &self->pt_join_lock); 632 SDPRINTF(("(pthread_exit %p) walking zombie\n", self)); 633 } 634 635 /*NOTREACHED*/ 636 pthread__abort(); 637 exit(1); 638 } 639 640 641 int 642 pthread_join(pthread_t thread, void **valptr) 643 { 644 pthread_t self; 645 char *name; 646 int num; 647 648 self = pthread__self(); 649 SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread)); 650 651 if (pthread__find(self, thread) != 0) 652 return ESRCH; 653 654 if (thread->pt_magic != PT_MAGIC) 655 return EINVAL; 656 657 if (thread == self) 658 return EDEADLK; 659 660 pthread_spinlock(self, &thread->pt_flaglock); 661 662 if (thread->pt_flags & PT_FLAG_DETACHED) { 663 pthread_spinunlock(self, &thread->pt_flaglock); 664 return EINVAL; 665 } 666 667 num = thread->pt_num; 668 pthread_spinlock(self, &thread->pt_join_lock); 669 while (thread->pt_state != PT_STATE_ZOMBIE) { 670 if ((thread->pt_state == PT_STATE_DEAD) || 671 (thread->pt_flags & PT_FLAG_DETACHED) || 672 (thread->pt_num != num)) { 673 /* 674 * Another thread beat us to the join, or called 675 * pthread_detach(). If num didn't match, the 676 * thread died and was recycled before we got 677 * another chance to run. 678 */ 679 pthread_spinunlock(self, &thread->pt_join_lock); 680 pthread_spinunlock(self, &thread->pt_flaglock); 681 return ESRCH; 682 } 683 /* 684 * "I'm not dead yet!" 685 * "You will be soon enough." 686 */ 687 pthread_spinunlock(self, &thread->pt_flaglock); 688 pthread_spinlock(self, &self->pt_statelock); 689 if (self->pt_cancel) { 690 pthread_spinunlock(self, &self->pt_statelock); 691 pthread_spinunlock(self, &thread->pt_join_lock); 692 pthread_exit(PTHREAD_CANCELED); 693 } 694 self->pt_state = PT_STATE_BLOCKED_QUEUE; 695 self->pt_sleepobj = thread; 696 self->pt_sleepq = &thread->pt_joiners; 697 self->pt_sleeplock = &thread->pt_join_lock; 698 pthread_spinunlock(self, &self->pt_statelock); 699 700 PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep); 701 pthread__block(self, &thread->pt_join_lock); 702 pthread_spinlock(self, &thread->pt_flaglock); 703 pthread_spinlock(self, &thread->pt_join_lock); 704 } 705 706 /* All ours. */ 707 thread->pt_state = PT_STATE_DEAD; 708 name = thread->pt_name; 709 thread->pt_name = NULL; 710 pthread_spinunlock(self, &thread->pt_join_lock); 711 pthread_spinunlock(self, &thread->pt_flaglock); 712 713 if (valptr != NULL) 714 *valptr = thread->pt_exitval; 715 716 SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread)); 717 718 pthread__dead(self, thread); 719 720 if (name != NULL) 721 free(name); 722 723 return 0; 724 } 725 726 727 int 728 pthread_equal(pthread_t t1, pthread_t t2) 729 { 730 731 /* Nothing special here. */ 732 return (t1 == t2); 733 } 734 735 736 int 737 pthread_detach(pthread_t thread) 738 { 739 pthread_t self; 740 int doreclaim = 0; 741 char *name = NULL; 742 743 self = pthread__self(); 744 745 if (pthread__find(self, thread) != 0) 746 return ESRCH; 747 748 if (thread->pt_magic != PT_MAGIC) 749 return EINVAL; 750 751 pthread_spinlock(self, &thread->pt_flaglock); 752 pthread_spinlock(self, &thread->pt_join_lock); 753 754 if (thread->pt_flags & PT_FLAG_DETACHED) { 755 pthread_spinunlock(self, &thread->pt_join_lock); 756 pthread_spinunlock(self, &thread->pt_flaglock); 757 return EINVAL; 758 } 759 760 thread->pt_flags |= PT_FLAG_DETACHED; 761 762 /* Any joiners have to be punted now. */ 763 pthread__sched_sleepers(self, &thread->pt_joiners); 764 765 if (thread->pt_state == PT_STATE_ZOMBIE) { 766 thread->pt_state = PT_STATE_DEAD; 767 name = thread->pt_name; 768 thread->pt_name = NULL; 769 doreclaim = 1; 770 } 771 772 pthread_spinunlock(self, &thread->pt_join_lock); 773 pthread_spinunlock(self, &thread->pt_flaglock); 774 775 if (doreclaim) { 776 pthread__dead(self, thread); 777 if (name != NULL) 778 free(name); 779 } 780 781 return 0; 782 } 783 784 785 static void 786 pthread__dead(pthread_t self, pthread_t thread) 787 { 788 789 SDPRINTF(("(pthread__dead %p) Reclaimed %p.\n", self, thread)); 790 pthread__assert(thread != self); 791 pthread__assert(thread->pt_state == PT_STATE_DEAD); 792 pthread__assert(thread->pt_name == NULL); 793 794 /* Cleanup time. Move the dead thread from allqueue to the deadqueue */ 795 pthread_spinlock(self, &pthread__allqueue_lock); 796 PTQ_REMOVE(&pthread__allqueue, thread, pt_allq); 797 pthread_spinunlock(self, &pthread__allqueue_lock); 798 799 pthread_spinlock(self, &pthread__deadqueue_lock); 800 PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq); 801 pthread_spinunlock(self, &pthread__deadqueue_lock); 802 } 803 804 805 int 806 pthread_getname_np(pthread_t thread, char *name, size_t len) 807 { 808 pthread_t self; 809 810 self = pthread__self(); 811 812 if (pthread__find(self, thread) != 0) 813 return ESRCH; 814 815 if (thread->pt_magic != PT_MAGIC) 816 return EINVAL; 817 818 pthread_spinlock(self, &thread->pt_join_lock); 819 if (thread->pt_name == NULL) 820 name[0] = '\0'; 821 else 822 strlcpy(name, thread->pt_name, len); 823 pthread_spinunlock(self, &thread->pt_join_lock); 824 825 return 0; 826 } 827 828 829 int 830 pthread_setname_np(pthread_t thread, const char *name, void *arg) 831 { 832 pthread_t self; 833 char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP]; 834 int namelen; 835 836 self = pthread__self(); 837 if (pthread__find(self, thread) != 0) 838 return ESRCH; 839 840 if (thread->pt_magic != PT_MAGIC) 841 return EINVAL; 842 843 namelen = snprintf(newname, sizeof(newname), name, arg); 844 if (namelen >= PTHREAD_MAX_NAMELEN_NP) 845 return EINVAL; 846 847 cp = strdup(newname); 848 if (cp == NULL) 849 return ENOMEM; 850 851 pthread_spinlock(self, &thread->pt_join_lock); 852 853 if (thread->pt_state == PT_STATE_DEAD) { 854 pthread_spinunlock(self, &thread->pt_join_lock); 855 free(cp); 856 return EINVAL; 857 } 858 859 oldname = thread->pt_name; 860 thread->pt_name = cp; 861 862 pthread_spinunlock(self, &thread->pt_join_lock); 863 864 if (oldname != NULL) 865 free(oldname); 866 867 return 0; 868 } 869 870 871 872 /* 873 * XXX There should be a way for applications to use the efficent 874 * inline version, but there are opacity/namespace issues. 875 */ 876 pthread_t 877 pthread_self(void) 878 { 879 880 return pthread__self(); 881 } 882 883 884 int 885 pthread_cancel(pthread_t thread) 886 { 887 pthread_t self; 888 889 self = pthread__self(); 890 #ifdef ERRORCHECK 891 if (pthread__find(self, thread) != 0) 892 return ESRCH; 893 #endif 894 if (!(thread->pt_state == PT_STATE_RUNNING || 895 thread->pt_state == PT_STATE_RUNNABLE || 896 thread->pt_state == PT_STATE_BLOCKED_QUEUE)) 897 return ESRCH; 898 899 pthread_spinlock(self, &thread->pt_flaglock); 900 thread->pt_flags |= PT_FLAG_CS_PENDING; 901 if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) { 902 thread->pt_cancel = 1; 903 pthread_spinunlock(self, &thread->pt_flaglock); 904 pthread_spinlock(self, &thread->pt_statelock); 905 if (thread->pt_blockgen != thread->pt_unblockgen) { 906 /* 907 * It's sleeping in the kernel. If we can wake 908 * it up, it will notice the cancellation when 909 * it returns. If it doesn't wake up when we 910 * make this call, then it's blocked 911 * uninterruptably in the kernel, and there's 912 * not much to be done about it. 913 */ 914 _lwp_wakeup(thread->pt_blockedlwp); 915 } else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) { 916 /* 917 * We're blocked somewhere (pthread__block() 918 * was called). Cause it to wake up; it will 919 * check for the cancellation if the routine 920 * is a cancellation point, and loop and reblock 921 * otherwise. 922 */ 923 pthread_spinlock(self, thread->pt_sleeplock); 924 PTQ_REMOVE(thread->pt_sleepq, thread, 925 pt_sleep); 926 pthread_spinunlock(self, thread->pt_sleeplock); 927 pthread__sched(self, thread); 928 } else { 929 /* 930 * Nothing. The target thread is running and will 931 * notice at the next deferred cancellation point. 932 */ 933 } 934 pthread_spinunlock(self, &thread->pt_statelock); 935 } else 936 pthread_spinunlock(self, &thread->pt_flaglock); 937 938 return 0; 939 } 940 941 942 int 943 pthread_setcancelstate(int state, int *oldstate) 944 { 945 pthread_t self; 946 int retval; 947 948 self = pthread__self(); 949 retval = 0; 950 951 pthread_spinlock(self, &self->pt_flaglock); 952 if (oldstate != NULL) { 953 if (self->pt_flags & PT_FLAG_CS_DISABLED) 954 *oldstate = PTHREAD_CANCEL_DISABLE; 955 else 956 *oldstate = PTHREAD_CANCEL_ENABLE; 957 } 958 959 if (state == PTHREAD_CANCEL_DISABLE) { 960 self->pt_flags |= PT_FLAG_CS_DISABLED; 961 if (self->pt_cancel) { 962 self->pt_flags |= PT_FLAG_CS_PENDING; 963 self->pt_cancel = 0; 964 } 965 } else if (state == PTHREAD_CANCEL_ENABLE) { 966 self->pt_flags &= ~PT_FLAG_CS_DISABLED; 967 /* 968 * If a cancellation was requested while cancellation 969 * was disabled, note that fact for future 970 * cancellation tests. 971 */ 972 if (self->pt_flags & PT_FLAG_CS_PENDING) { 973 self->pt_cancel = 1; 974 /* This is not a deferred cancellation point. */ 975 if (self->pt_flags & PT_FLAG_CS_ASYNC) { 976 pthread_spinunlock(self, &self->pt_flaglock); 977 pthread_exit(PTHREAD_CANCELED); 978 } 979 } 980 } else 981 retval = EINVAL; 982 983 pthread_spinunlock(self, &self->pt_flaglock); 984 return retval; 985 } 986 987 988 int 989 pthread_setcanceltype(int type, int *oldtype) 990 { 991 pthread_t self; 992 int retval; 993 994 self = pthread__self(); 995 retval = 0; 996 997 pthread_spinlock(self, &self->pt_flaglock); 998 999 if (oldtype != NULL) { 1000 if (self->pt_flags & PT_FLAG_CS_ASYNC) 1001 *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS; 1002 else 1003 *oldtype = PTHREAD_CANCEL_DEFERRED; 1004 } 1005 1006 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { 1007 self->pt_flags |= PT_FLAG_CS_ASYNC; 1008 if (self->pt_cancel) { 1009 pthread_spinunlock(self, &self->pt_flaglock); 1010 pthread_exit(PTHREAD_CANCELED); 1011 } 1012 } else if (type == PTHREAD_CANCEL_DEFERRED) 1013 self->pt_flags &= ~PT_FLAG_CS_ASYNC; 1014 else 1015 retval = EINVAL; 1016 1017 pthread_spinunlock(self, &self->pt_flaglock); 1018 return retval; 1019 } 1020 1021 1022 void 1023 pthread_testcancel() 1024 { 1025 pthread_t self; 1026 1027 self = pthread__self(); 1028 if (self->pt_cancel) 1029 pthread_exit(PTHREAD_CANCELED); 1030 } 1031 1032 1033 /* 1034 * POSIX requires that certain functions return an error rather than 1035 * invoking undefined behavior even when handed completely bogus 1036 * pthread_t values, e.g. stack garbage or (pthread_t)666. This 1037 * utility routine searches the list of threads for the pthread_t 1038 * value without dereferencing it. 1039 */ 1040 int 1041 pthread__find(pthread_t self, pthread_t id) 1042 { 1043 pthread_t target; 1044 1045 pthread_spinlock(self, &pthread__allqueue_lock); 1046 PTQ_FOREACH(target, &pthread__allqueue, pt_allq) 1047 if (target == id) 1048 break; 1049 pthread_spinunlock(self, &pthread__allqueue_lock); 1050 1051 if (target == NULL) 1052 return ESRCH; 1053 1054 return 0; 1055 } 1056 1057 1058 void 1059 pthread__testcancel(pthread_t self) 1060 { 1061 1062 if (self->pt_cancel) 1063 pthread_exit(PTHREAD_CANCELED); 1064 } 1065 1066 1067 void 1068 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store) 1069 { 1070 pthread_t self; 1071 struct pt_clean_t *entry; 1072 1073 self = pthread__self(); 1074 entry = store; 1075 entry->ptc_cleanup = cleanup; 1076 entry->ptc_arg = arg; 1077 PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next); 1078 } 1079 1080 1081 void 1082 pthread__cleanup_pop(int ex, void *store) 1083 { 1084 pthread_t self; 1085 struct pt_clean_t *entry; 1086 1087 self = pthread__self(); 1088 entry = store; 1089 1090 PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next); 1091 if (ex) 1092 (*entry->ptc_cleanup)(entry->ptc_arg); 1093 } 1094 1095 1096 int * 1097 pthread__errno(void) 1098 { 1099 pthread_t self; 1100 1101 self = pthread__self(); 1102 1103 return &(self->pt_errno); 1104 } 1105 1106 ssize_t _sys_write(int, const void *, size_t); 1107 1108 void 1109 pthread__assertfunc(const char *file, int line, const char *function, 1110 const char *expr) 1111 { 1112 char buf[1024]; 1113 int len; 1114 1115 SDPRINTF(("(af)\n")); 1116 1117 /* 1118 * snprintf should not acquire any locks, or we could 1119 * end up deadlocked if the assert caller held locks. 1120 */ 1121 len = snprintf(buf, 1024, 1122 "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n", 1123 expr, file, line, 1124 function ? ", function \"" : "", 1125 function ? function : "", 1126 function ? "\"" : ""); 1127 1128 _sys_write(STDERR_FILENO, buf, (size_t)len); 1129 (void)kill(getpid(), SIGABRT); 1130 1131 _exit(1); 1132 } 1133 1134 1135 void 1136 pthread__errorfunc(const char *file, int line, const char *function, 1137 const char *msg) 1138 { 1139 char buf[1024]; 1140 size_t len; 1141 1142 if (pthread__diagassert == 0) 1143 return; 1144 1145 /* 1146 * snprintf should not acquire any locks, or we could 1147 * end up deadlocked if the assert caller held locks. 1148 */ 1149 len = snprintf(buf, 1024, 1150 "%s: Error detected by libpthread: %s.\n" 1151 "Detected by file \"%s\", line %d%s%s%s.\n" 1152 "See pthread(3) for information.\n", 1153 getprogname(), msg, file, line, 1154 function ? ", function \"" : "", 1155 function ? function : "", 1156 function ? "\"" : ""); 1157 1158 if (pthread__diagassert & DIAGASSERT_STDERR) 1159 _sys_write(STDERR_FILENO, buf, len); 1160 1161 if (pthread__diagassert & DIAGASSERT_SYSLOG) 1162 syslog(LOG_DEBUG | LOG_USER, "%s", buf); 1163 1164 if (pthread__diagassert & DIAGASSERT_ABORT) { 1165 (void)kill(getpid(), SIGABRT); 1166 _exit(1); 1167 } 1168 } 1169