1 /* $NetBSD: pthread.c,v 1.38 2005/02/03 17:30:33 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2001,2002,2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __RCSID("$NetBSD: pthread.c,v 1.38 2005/02/03 17:30:33 christos Exp $"); 41 42 #include <err.h> 43 #include <errno.h> 44 #include <lwp.h> 45 #include <signal.h> 46 #include <stdio.h> 47 #include <stdlib.h> 48 #include <string.h> 49 #include <syslog.h> 50 #include <ucontext.h> 51 #include <unistd.h> 52 #include <sys/param.h> 53 #include <sys/sysctl.h> 54 #ifdef PTHREAD_MLOCK_KLUDGE 55 #include <sys/mman.h> 56 #endif 57 58 #include <sched.h> 59 #include "pthread.h" 60 #include "pthread_int.h" 61 62 #ifdef PTHREAD_MAIN_DEBUG 63 #define SDPRINTF(x) DPRINTF(x) 64 #else 65 #define SDPRINTF(x) 66 #endif 67 68 static void pthread__create_tramp(void *(*start)(void *), void *arg); 69 static void pthread__dead(pthread_t, pthread_t); 70 71 int pthread__started; 72 73 pthread_spin_t pthread__allqueue_lock = __SIMPLELOCK_UNLOCKED; 74 struct pthread_queue_t pthread__allqueue; 75 76 pthread_spin_t pthread__deadqueue_lock = __SIMPLELOCK_UNLOCKED; 77 struct pthread_queue_t pthread__deadqueue; 78 struct pthread_queue_t *pthread__reidlequeue; 79 80 static int nthreads; 81 static int nextthread; 82 static pthread_spin_t nextthread_lock = __SIMPLELOCK_UNLOCKED; 83 static pthread_attr_t pthread_default_attr; 84 85 enum { 86 DIAGASSERT_ABORT = 1<<0, 87 DIAGASSERT_STDERR = 1<<1, 88 DIAGASSERT_SYSLOG = 1<<2 89 }; 90 91 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR; 92 93 pthread_spin_t pthread__runqueue_lock = __SIMPLELOCK_UNLOCKED; 94 struct pthread_queue_t pthread__runqueue; 95 struct pthread_queue_t pthread__idlequeue; 96 struct pthread_queue_t pthread__suspqueue; 97 98 int pthread__concurrency, pthread__maxconcurrency; 99 100 __strong_alias(__libc_thr_self,pthread_self) 101 __strong_alias(__libc_thr_create,pthread_create) 102 __strong_alias(__libc_thr_exit,pthread_exit) 103 __strong_alias(__libc_thr_errno,pthread__errno) 104 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate) 105 106 /* 107 * Static library kludge. Place a reference to a symbol any library 108 * file which does not already have a reference here. 109 */ 110 extern int pthread__cancel_stub_binder; 111 extern int pthread__sched_binder; 112 extern struct pthread_queue_t pthread__nanosleeping; 113 114 void *pthread__static_lib_binder[] = { 115 &pthread__cancel_stub_binder, 116 pthread_cond_init, 117 pthread_mutex_init, 118 pthread_rwlock_init, 119 pthread_barrier_init, 120 pthread_key_create, 121 pthread_setspecific, 122 &pthread__sched_binder, 123 &pthread__nanosleeping 124 }; 125 126 /* 127 * This needs to be started by the library loading code, before main() 128 * gets to run, for various things that use the state of the initial thread 129 * to work properly (thread-specific data is an application-visible example; 130 * spinlock counts for mutexes is an internal example). 131 */ 132 void 133 pthread_init(void) 134 { 135 pthread_t first; 136 char *p; 137 int i, mib[2], ncpu; 138 size_t len; 139 extern int __isthreaded; 140 #ifdef PTHREAD_MLOCK_KLUDGE 141 int ret; 142 #endif 143 144 mib[0] = CTL_HW; 145 mib[1] = HW_NCPU; 146 147 len = sizeof(ncpu); 148 sysctl(mib, 2, &ncpu, &len, NULL, 0); 149 150 /* Initialize locks first; they're needed elsewhere. */ 151 pthread__lockprim_init(ncpu); 152 153 /* Find out requested/possible concurrency */ 154 pthread__maxconcurrency = 1; 155 p = getenv("PTHREAD_CONCURRENCY"); 156 if (p) 157 pthread__maxconcurrency = atoi(p); 158 if (pthread__maxconcurrency < 1) 159 pthread__maxconcurrency = 1; 160 if (pthread__maxconcurrency > ncpu) 161 pthread__maxconcurrency = ncpu; 162 163 /* Allocate data structures */ 164 pthread__reidlequeue = (struct pthread_queue_t *)malloc 165 (pthread__maxconcurrency * sizeof(struct pthread_queue_t)); 166 if (pthread__reidlequeue == NULL) 167 err(1, "Couldn't allocate memory for pthread__reidlequeue"); 168 169 /* Basic data structure setup */ 170 pthread_attr_init(&pthread_default_attr); 171 PTQ_INIT(&pthread__allqueue); 172 PTQ_INIT(&pthread__deadqueue); 173 #ifdef PTHREAD_MLOCK_KLUDGE 174 ret = mlock(&pthread__deadqueue, sizeof(pthread__deadqueue)); 175 pthread__assert(ret == 0); 176 #endif 177 PTQ_INIT(&pthread__runqueue); 178 PTQ_INIT(&pthread__idlequeue); 179 for (i = 0; i < pthread__maxconcurrency; i++) 180 PTQ_INIT(&pthread__reidlequeue[i]); 181 nthreads = 1; 182 183 /* Create the thread structure corresponding to main() */ 184 pthread__initmain(&first); 185 pthread__initthread(first, first); 186 first->pt_state = PT_STATE_RUNNING; 187 sigprocmask(0, NULL, &first->pt_sigmask); 188 PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq); 189 190 /* Start subsystems */ 191 pthread__signal_init(); 192 PTHREAD_MD_INIT 193 #ifdef PTHREAD__DEBUG 194 pthread__debug_init(ncpu); 195 #endif 196 197 for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) { 198 switch (*p) { 199 case 'a': 200 pthread__diagassert |= DIAGASSERT_ABORT; 201 break; 202 case 'A': 203 pthread__diagassert &= ~DIAGASSERT_ABORT; 204 break; 205 case 'e': 206 pthread__diagassert |= DIAGASSERT_STDERR; 207 break; 208 case 'E': 209 pthread__diagassert &= ~DIAGASSERT_STDERR; 210 break; 211 case 'l': 212 pthread__diagassert |= DIAGASSERT_SYSLOG; 213 break; 214 case 'L': 215 pthread__diagassert &= ~DIAGASSERT_SYSLOG; 216 break; 217 } 218 } 219 220 221 /* Tell libc that we're here and it should role-play accordingly. */ 222 __isthreaded = 1; 223 } 224 225 static void 226 pthread__child_callback(void) 227 { 228 /* 229 * Clean up data structures that a forked child process might 230 * trip over. Note that if threads have been created (causing 231 * this handler to be registered) the standards say that the 232 * child will trigger undefined behavior if it makes any 233 * pthread_* calls (or any other calls that aren't 234 * async-signal-safe), so we don't really have to clean up 235 * much. Anything that permits some pthread_* calls to work is 236 * merely being polite. 237 */ 238 pthread__started = 0; 239 } 240 241 static void 242 pthread__start(void) 243 { 244 pthread_t self, idle; 245 int i, ret; 246 247 self = pthread__self(); /* should be the "main()" thread */ 248 249 /* 250 * Per-process timers are cleared by fork(); despite the 251 * various restrictions on fork() and threads, it's legal to 252 * fork() before creating any threads. 253 */ 254 pthread__alarm_init(); 255 256 pthread_atfork(NULL, NULL, pthread__child_callback); 257 258 /* 259 * Create idle threads 260 * XXX need to create more idle threads if concurrency > 3 261 */ 262 for (i = 0; i < NIDLETHREADS; i++) { 263 ret = pthread__stackalloc(&idle); 264 if (ret != 0) 265 err(1, "Couldn't allocate stack for idle thread!"); 266 pthread__initthread(self, idle); 267 sigfillset(&idle->pt_sigmask); 268 idle->pt_type = PT_THREAD_IDLE; 269 PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq); 270 pthread__sched_idle(self, idle); 271 } 272 273 /* Start up the SA subsystem */ 274 pthread__sa_start(); 275 SDPRINTF(("(pthread__start %p) Started.\n", self)); 276 } 277 278 279 /* General-purpose thread data structure sanitization. */ 280 void 281 pthread__initthread(pthread_t self, pthread_t t) 282 { 283 int id; 284 285 pthread_spinlock(self, &nextthread_lock); 286 id = nextthread; 287 nextthread++; 288 pthread_spinunlock(self, &nextthread_lock); 289 t->pt_num = id; 290 291 t->pt_magic = PT_MAGIC; 292 t->pt_type = PT_THREAD_NORMAL; 293 t->pt_state = PT_STATE_RUNNABLE; 294 pthread_lockinit(&t->pt_statelock); 295 pthread_lockinit(&t->pt_flaglock); 296 t->pt_spinlocks = 0; 297 t->pt_next = NULL; 298 t->pt_exitval = NULL; 299 t->pt_flags = 0; 300 t->pt_cancel = 0; 301 t->pt_errno = 0; 302 t->pt_parent = NULL; 303 t->pt_heldlock = NULL; 304 t->pt_switchto = NULL; 305 t->pt_trapuc = NULL; 306 sigemptyset(&t->pt_siglist); 307 sigemptyset(&t->pt_sigmask); 308 pthread_lockinit(&t->pt_siglock); 309 PTQ_INIT(&t->pt_joiners); 310 pthread_lockinit(&t->pt_join_lock); 311 PTQ_INIT(&t->pt_cleanup_stack); 312 memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX); 313 t->pt_name = NULL; 314 #ifdef PTHREAD__DEBUG 315 t->blocks = 0; 316 t->preempts = 0; 317 t->rescheds = 0; 318 #endif 319 } 320 321 322 int 323 pthread_create(pthread_t *thread, const pthread_attr_t *attr, 324 void *(*startfunc)(void *), void *arg) 325 { 326 pthread_t self, newthread; 327 pthread_attr_t nattr; 328 struct pthread_attr_private *p; 329 char *name; 330 #ifdef PTHREAD_MLOCK_KLUDGE 331 int ret; 332 #endif 333 334 PTHREADD_ADD(PTHREADD_CREATE); 335 336 /* 337 * It's okay to check this without a lock because there can 338 * only be one thread before it becomes true. 339 */ 340 if (pthread__started == 0) { 341 pthread__start(); 342 pthread__started = 1; 343 } 344 345 if (attr == NULL) 346 nattr = pthread_default_attr; 347 else if (attr->pta_magic == PT_ATTR_MAGIC) 348 nattr = *attr; 349 else 350 return EINVAL; 351 352 /* Fetch misc. attributes from the attr structure. */ 353 name = NULL; 354 if ((p = nattr.pta_private) != NULL) 355 if (p->ptap_name[0] != '\0') 356 if ((name = strdup(p->ptap_name)) == NULL) 357 return ENOMEM; 358 359 self = pthread__self(); 360 361 pthread_spinlock(self, &pthread__deadqueue_lock); 362 if (!PTQ_EMPTY(&pthread__deadqueue)) { 363 newthread = PTQ_FIRST(&pthread__deadqueue); 364 PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq); 365 pthread_spinunlock(self, &pthread__deadqueue_lock); 366 } else { 367 pthread_spinunlock(self, &pthread__deadqueue_lock); 368 /* Set up a stack and allocate space for a pthread_st. */ 369 ret = pthread__stackalloc(&newthread); 370 if (ret != 0) { 371 if (name) 372 free(name); 373 return ret; 374 } 375 #ifdef PTHREAD_MLOCK_KLUDGE 376 ret = mlock(newthread, sizeof(struct __pthread_st)); 377 pthread__assert(ret == 0); 378 #endif 379 } 380 381 /* 2. Set up state. */ 382 pthread__initthread(self, newthread); 383 newthread->pt_flags = nattr.pta_flags; 384 newthread->pt_sigmask = self->pt_sigmask; 385 386 /* 3. Set up misc. attributes. */ 387 newthread->pt_name = name; 388 389 /* 390 * 4. Set up context. 391 * 392 * The pt_uc pointer points to a location safely below the 393 * stack start; this is arranged by pthread__stackalloc(). 394 */ 395 _INITCONTEXT_U(newthread->pt_uc); 396 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER 397 pthread__uc_id(newthread->pt_uc) = newthread; 398 #endif 399 newthread->pt_uc->uc_stack = newthread->pt_stack; 400 newthread->pt_uc->uc_link = NULL; 401 makecontext(newthread->pt_uc, pthread__create_tramp, 2, 402 startfunc, arg); 403 404 /* 5. Add to list of all threads. */ 405 pthread_spinlock(self, &pthread__allqueue_lock); 406 PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq); 407 nthreads++; 408 pthread_spinunlock(self, &pthread__allqueue_lock); 409 410 SDPRINTF(("(pthread_create %p) Created new thread %p (name pointer %p).\n", self, newthread, newthread->pt_name)); 411 /* 6. Put on appropriate queue. */ 412 if (newthread->pt_flags & PT_FLAG_SUSPENDED) { 413 pthread_spinlock(self, &newthread->pt_statelock); 414 pthread__suspend(self, newthread); 415 pthread_spinunlock(self, &newthread->pt_statelock); 416 } else 417 pthread__sched(self, newthread); 418 419 *thread = newthread; 420 421 return 0; 422 } 423 424 425 static void 426 pthread__create_tramp(void *(*start)(void *), void *arg) 427 { 428 void *retval; 429 430 retval = (*start)(arg); 431 432 pthread_exit(retval); 433 434 /*NOTREACHED*/ 435 pthread__abort(); 436 } 437 438 int 439 pthread_suspend_np(pthread_t thread) 440 { 441 pthread_t self = pthread__self(); 442 if (self == thread) { 443 fprintf(stderr, "suspend_np: can't suspend self\n"); 444 return EDEADLK; 445 } 446 SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p (state %d).\n", 447 self, thread, thread->pt_state)); 448 pthread_spinlock(self, &thread->pt_statelock); 449 if (thread->pt_blockgen != thread->pt_unblockgen) { 450 /* XXX flaglock? */ 451 thread->pt_flags |= PT_FLAG_SUSPENDED; 452 pthread_spinunlock(self, &thread->pt_statelock); 453 return 0; 454 } 455 switch (thread->pt_state) { 456 case PT_STATE_RUNNING: 457 pthread__abort(); /* XXX */ 458 break; 459 case PT_STATE_SUSPENDED: 460 pthread_spinunlock(self, &thread->pt_statelock); 461 return 0; 462 case PT_STATE_RUNNABLE: 463 pthread_spinlock(self, &pthread__runqueue_lock); 464 PTQ_REMOVE(&pthread__runqueue, thread, pt_runq); 465 pthread_spinunlock(self, &pthread__runqueue_lock); 466 break; 467 case PT_STATE_BLOCKED_QUEUE: 468 pthread_spinlock(self, thread->pt_sleeplock); 469 PTQ_REMOVE(thread->pt_sleepq, thread, pt_sleep); 470 pthread_spinunlock(self, thread->pt_sleeplock); 471 break; 472 default: 473 break; /* XXX */ 474 } 475 pthread__suspend(self, thread); 476 pthread_spinunlock(self, &thread->pt_statelock); 477 return 0; 478 } 479 480 int 481 pthread_resume_np(pthread_t thread) 482 { 483 484 pthread_t self = pthread__self(); 485 SDPRINTF(("(pthread_resume_np %p) Resume thread %p (state %d).\n", 486 self, thread, thread->pt_state)); 487 pthread_spinlock(self, &thread->pt_statelock); 488 /* XXX flaglock? */ 489 thread->pt_flags &= ~PT_FLAG_SUSPENDED; 490 if (thread->pt_state == PT_STATE_SUSPENDED) { 491 pthread_spinlock(self, &pthread__runqueue_lock); 492 PTQ_REMOVE(&pthread__suspqueue, thread, pt_runq); 493 pthread_spinunlock(self, &pthread__runqueue_lock); 494 pthread__sched(self, thread); 495 } 496 pthread_spinunlock(self, &thread->pt_statelock); 497 return 0; 498 } 499 500 501 /* 502 * Other threads will switch to the idle thread so that they 503 * can dispose of any awkward locks or recycle upcall state. 504 */ 505 void 506 pthread__idle(void) 507 { 508 pthread_t self; 509 510 PTHREADD_ADD(PTHREADD_IDLE); 511 self = pthread__self(); 512 SDPRINTF(("(pthread__idle %p).\n", self)); 513 514 /* 515 * The drill here is that we want to yield the processor, 516 * but for the thread itself to be recovered, we need to be on 517 * a list somewhere for the thread system to know about us. 518 */ 519 pthread_spinlock(self, &pthread__deadqueue_lock); 520 PTQ_INSERT_TAIL(&pthread__reidlequeue[self->pt_vpid], self, pt_runq); 521 pthread__concurrency--; 522 SDPRINTF(("(yield %p concurrency) now %d\n", self, 523 pthread__concurrency)); 524 /* Don't need a flag lock; nothing else has a handle on this thread */ 525 self->pt_flags |= PT_FLAG_IDLED; 526 pthread_spinunlock(self, &pthread__deadqueue_lock); 527 528 /* 529 * If we get to run this, then no preemption has happened 530 * (because the upcall handler will not continue an idle thread with 531 * PT_FLAG_IDLED set), and so we can yield the processor safely. 532 */ 533 SDPRINTF(("(pthread__idle %p) yielding.\n", self)); 534 sa_yield(); 535 536 /* NOTREACHED */ 537 self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */ 538 SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self)); 539 pthread__abort(); 540 } 541 542 543 void 544 pthread_exit(void *retval) 545 { 546 pthread_t self; 547 struct pt_clean_t *cleanup; 548 char *name; 549 int nt; 550 551 self = pthread__self(); 552 SDPRINTF(("(pthread_exit %p) Exiting (status %p, flags %x, cancel %d).\n", self, retval, self->pt_flags, self->pt_cancel)); 553 554 /* Disable cancellability. */ 555 pthread_spinlock(self, &self->pt_flaglock); 556 self->pt_flags |= PT_FLAG_CS_DISABLED; 557 self->pt_cancel = 0; 558 pthread_spinunlock(self, &self->pt_flaglock); 559 560 /* Call any cancellation cleanup handlers */ 561 while (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 562 cleanup = PTQ_FIRST(&self->pt_cleanup_stack); 563 PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next); 564 (*cleanup->ptc_cleanup)(cleanup->ptc_arg); 565 } 566 567 /* Perform cleanup of thread-specific data */ 568 pthread__destroy_tsd(self); 569 570 self->pt_exitval = retval; 571 572 /* 573 * it's safe to check PT_FLAG_DETACHED without pt_flaglock 574 * because it's only set by pthread_detach with pt_join_lock held. 575 */ 576 pthread_spinlock(self, &self->pt_join_lock); 577 if (self->pt_flags & PT_FLAG_DETACHED) { 578 self->pt_state = PT_STATE_DEAD; 579 pthread_spinunlock(self, &self->pt_join_lock); 580 name = self->pt_name; 581 self->pt_name = NULL; 582 583 if (name != NULL) 584 free(name); 585 586 pthread_spinlock(self, &pthread__allqueue_lock); 587 PTQ_REMOVE(&pthread__allqueue, self, pt_allq); 588 nthreads--; 589 nt = nthreads; 590 pthread_spinunlock(self, &pthread__allqueue_lock); 591 592 if (nt == 0) { 593 /* Whoah, we're the last one. Time to go. */ 594 exit(0); 595 } 596 597 /* Yeah, yeah, doing work while we're dead is tacky. */ 598 pthread_spinlock(self, &pthread__deadqueue_lock); 599 PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq); 600 pthread__block(self, &pthread__deadqueue_lock); 601 } else { 602 self->pt_state = PT_STATE_ZOMBIE; 603 /* Note: name will be freed by the joiner. */ 604 pthread_spinlock(self, &pthread__allqueue_lock); 605 nthreads--; 606 nt = nthreads; 607 pthread_spinunlock(self, &pthread__allqueue_lock); 608 if (nt == 0) { 609 /* Whoah, we're the last one. Time to go. */ 610 exit(0); 611 } 612 /* 613 * Wake up all the potential joiners. Only one can win. 614 * (Can you say "Thundering Herd"? I knew you could.) 615 */ 616 pthread__sched_sleepers(self, &self->pt_joiners); 617 pthread__block(self, &self->pt_join_lock); 618 } 619 620 /*NOTREACHED*/ 621 pthread__abort(); 622 exit(1); 623 } 624 625 626 int 627 pthread_join(pthread_t thread, void **valptr) 628 { 629 pthread_t self; 630 char *name; 631 int num; 632 633 self = pthread__self(); 634 SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread)); 635 636 if (pthread__find(self, thread) != 0) 637 return ESRCH; 638 639 if (thread->pt_magic != PT_MAGIC) 640 return EINVAL; 641 642 if (thread == self) 643 return EDEADLK; 644 645 pthread_spinlock(self, &thread->pt_flaglock); 646 647 if (thread->pt_flags & PT_FLAG_DETACHED) { 648 pthread_spinunlock(self, &thread->pt_flaglock); 649 return EINVAL; 650 } 651 652 num = thread->pt_num; 653 pthread_spinlock(self, &thread->pt_join_lock); 654 while (thread->pt_state != PT_STATE_ZOMBIE) { 655 if ((thread->pt_state == PT_STATE_DEAD) || 656 (thread->pt_flags & PT_FLAG_DETACHED) || 657 (thread->pt_num != num)) { 658 /* 659 * Another thread beat us to the join, or called 660 * pthread_detach(). If num didn't match, the 661 * thread died and was recycled before we got 662 * another chance to run. 663 */ 664 pthread_spinunlock(self, &thread->pt_join_lock); 665 pthread_spinunlock(self, &thread->pt_flaglock); 666 return ESRCH; 667 } 668 /* 669 * "I'm not dead yet!" 670 * "You will be soon enough." 671 */ 672 pthread_spinunlock(self, &thread->pt_flaglock); 673 pthread_spinlock(self, &self->pt_statelock); 674 if (self->pt_cancel) { 675 pthread_spinunlock(self, &self->pt_statelock); 676 pthread_spinunlock(self, &thread->pt_join_lock); 677 pthread_exit(PTHREAD_CANCELED); 678 } 679 self->pt_state = PT_STATE_BLOCKED_QUEUE; 680 self->pt_sleepobj = thread; 681 self->pt_sleepq = &thread->pt_joiners; 682 self->pt_sleeplock = &thread->pt_join_lock; 683 pthread_spinunlock(self, &self->pt_statelock); 684 685 PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep); 686 pthread__block(self, &thread->pt_join_lock); 687 pthread_spinlock(self, &thread->pt_flaglock); 688 pthread_spinlock(self, &thread->pt_join_lock); 689 } 690 691 /* All ours. */ 692 thread->pt_state = PT_STATE_DEAD; 693 name = thread->pt_name; 694 thread->pt_name = NULL; 695 pthread_spinunlock(self, &thread->pt_join_lock); 696 pthread_spinunlock(self, &thread->pt_flaglock); 697 698 if (valptr != NULL) 699 *valptr = thread->pt_exitval; 700 701 SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread)); 702 703 pthread__dead(self, thread); 704 705 if (name != NULL) 706 free(name); 707 708 return 0; 709 } 710 711 712 int 713 pthread_equal(pthread_t t1, pthread_t t2) 714 { 715 716 /* Nothing special here. */ 717 return (t1 == t2); 718 } 719 720 721 int 722 pthread_detach(pthread_t thread) 723 { 724 pthread_t self; 725 int doreclaim = 0; 726 char *name = NULL; 727 728 self = pthread__self(); 729 730 if (pthread__find(self, thread) != 0) 731 return ESRCH; 732 733 if (thread->pt_magic != PT_MAGIC) 734 return EINVAL; 735 736 pthread_spinlock(self, &thread->pt_flaglock); 737 pthread_spinlock(self, &thread->pt_join_lock); 738 739 if (thread->pt_flags & PT_FLAG_DETACHED) { 740 pthread_spinunlock(self, &thread->pt_join_lock); 741 pthread_spinunlock(self, &thread->pt_flaglock); 742 return EINVAL; 743 } 744 745 thread->pt_flags |= PT_FLAG_DETACHED; 746 747 /* Any joiners have to be punted now. */ 748 pthread__sched_sleepers(self, &thread->pt_joiners); 749 750 if (thread->pt_state == PT_STATE_ZOMBIE) { 751 thread->pt_state = PT_STATE_DEAD; 752 name = thread->pt_name; 753 thread->pt_name = NULL; 754 doreclaim = 1; 755 } 756 757 pthread_spinunlock(self, &thread->pt_join_lock); 758 pthread_spinunlock(self, &thread->pt_flaglock); 759 760 if (doreclaim) { 761 pthread__dead(self, thread); 762 if (name != NULL) 763 free(name); 764 } 765 766 return 0; 767 } 768 769 770 static void 771 pthread__dead(pthread_t self, pthread_t thread) 772 { 773 774 SDPRINTF(("(pthread__dead %p) Reclaimed %p.\n", self, thread)); 775 pthread__assert(thread != self); 776 pthread__assert(thread->pt_state == PT_STATE_DEAD); 777 pthread__assert(thread->pt_name == NULL); 778 779 /* Cleanup time. Move the dead thread from allqueue to the deadqueue */ 780 pthread_spinlock(self, &pthread__allqueue_lock); 781 PTQ_REMOVE(&pthread__allqueue, thread, pt_allq); 782 pthread_spinunlock(self, &pthread__allqueue_lock); 783 784 pthread_spinlock(self, &pthread__deadqueue_lock); 785 PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq); 786 pthread_spinunlock(self, &pthread__deadqueue_lock); 787 } 788 789 790 int 791 pthread_getname_np(pthread_t thread, char *name, size_t len) 792 { 793 pthread_t self; 794 795 self = pthread__self(); 796 797 if (pthread__find(self, thread) != 0) 798 return ESRCH; 799 800 if (thread->pt_magic != PT_MAGIC) 801 return EINVAL; 802 803 pthread_spinlock(self, &thread->pt_join_lock); 804 if (thread->pt_name == NULL) 805 name[0] = '\0'; 806 else 807 strlcpy(name, thread->pt_name, len); 808 pthread_spinunlock(self, &thread->pt_join_lock); 809 810 return 0; 811 } 812 813 814 int 815 pthread_setname_np(pthread_t thread, const char *name, void *arg) 816 { 817 pthread_t self = pthread_self(); 818 char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP]; 819 int namelen; 820 821 if (pthread__find(self, thread) != 0) 822 return ESRCH; 823 824 if (thread->pt_magic != PT_MAGIC) 825 return EINVAL; 826 827 namelen = snprintf(newname, sizeof(newname), name, arg); 828 if (namelen >= PTHREAD_MAX_NAMELEN_NP) 829 return EINVAL; 830 831 cp = strdup(newname); 832 if (cp == NULL) 833 return ENOMEM; 834 835 pthread_spinlock(self, &thread->pt_join_lock); 836 837 if (thread->pt_state == PT_STATE_DEAD) { 838 pthread_spinunlock(self, &thread->pt_join_lock); 839 free(cp); 840 return EINVAL; 841 } 842 843 oldname = thread->pt_name; 844 thread->pt_name = cp; 845 846 pthread_spinunlock(self, &thread->pt_join_lock); 847 848 if (oldname != NULL) 849 free(oldname); 850 851 return 0; 852 } 853 854 855 856 /* 857 * XXX There should be a way for applications to use the efficent 858 * inline version, but there are opacity/namespace issues. 859 */ 860 pthread_t 861 pthread_self(void) 862 { 863 864 return pthread__self(); 865 } 866 867 868 int 869 pthread_cancel(pthread_t thread) 870 { 871 pthread_t self; 872 873 if (!(thread->pt_state == PT_STATE_RUNNING || 874 thread->pt_state == PT_STATE_RUNNABLE || 875 thread->pt_state == PT_STATE_BLOCKED_QUEUE)) 876 return ESRCH; 877 878 self = pthread__self(); 879 880 pthread_spinlock(self, &thread->pt_flaglock); 881 thread->pt_flags |= PT_FLAG_CS_PENDING; 882 if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) { 883 thread->pt_cancel = 1; 884 pthread_spinunlock(self, &thread->pt_flaglock); 885 pthread_spinlock(self, &thread->pt_statelock); 886 if (thread->pt_blockgen != thread->pt_unblockgen) { 887 /* 888 * It's sleeping in the kernel. If we can wake 889 * it up, it will notice the cancellation when 890 * it returns. If it doesn't wake up when we 891 * make this call, then it's blocked 892 * uninterruptably in the kernel, and there's 893 * not much to be done about it. 894 */ 895 _lwp_wakeup(thread->pt_blockedlwp); 896 } else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) { 897 /* 898 * We're blocked somewhere (pthread__block() 899 * was called). Cause it to wake up; it will 900 * check for the cancellation if the routine 901 * is a cancellation point, and loop and reblock 902 * otherwise. 903 */ 904 pthread_spinlock(self, thread->pt_sleeplock); 905 PTQ_REMOVE(thread->pt_sleepq, thread, 906 pt_sleep); 907 pthread_spinunlock(self, thread->pt_sleeplock); 908 pthread__sched(self, thread); 909 } else { 910 /* 911 * Nothing. The target thread is running and will 912 * notice at the next deferred cancellation point. 913 */ 914 } 915 pthread_spinunlock(self, &thread->pt_statelock); 916 } else 917 pthread_spinunlock(self, &thread->pt_flaglock); 918 919 return 0; 920 } 921 922 923 int 924 pthread_setcancelstate(int state, int *oldstate) 925 { 926 pthread_t self; 927 int retval; 928 929 self = pthread__self(); 930 retval = 0; 931 932 pthread_spinlock(self, &self->pt_flaglock); 933 if (oldstate != NULL) { 934 if (self->pt_flags & PT_FLAG_CS_DISABLED) 935 *oldstate = PTHREAD_CANCEL_DISABLE; 936 else 937 *oldstate = PTHREAD_CANCEL_ENABLE; 938 } 939 940 if (state == PTHREAD_CANCEL_DISABLE) { 941 self->pt_flags |= PT_FLAG_CS_DISABLED; 942 if (self->pt_cancel) { 943 self->pt_flags |= PT_FLAG_CS_PENDING; 944 self->pt_cancel = 0; 945 } 946 } else if (state == PTHREAD_CANCEL_ENABLE) { 947 self->pt_flags &= ~PT_FLAG_CS_DISABLED; 948 /* 949 * If a cancellation was requested while cancellation 950 * was disabled, note that fact for future 951 * cancellation tests. 952 */ 953 if (self->pt_flags & PT_FLAG_CS_PENDING) { 954 self->pt_cancel = 1; 955 /* This is not a deferred cancellation point. */ 956 if (self->pt_flags & PT_FLAG_CS_ASYNC) { 957 pthread_spinunlock(self, &self->pt_flaglock); 958 pthread_exit(PTHREAD_CANCELED); 959 } 960 } 961 } else 962 retval = EINVAL; 963 964 pthread_spinunlock(self, &self->pt_flaglock); 965 return retval; 966 } 967 968 969 int 970 pthread_setcanceltype(int type, int *oldtype) 971 { 972 pthread_t self; 973 int retval; 974 975 self = pthread__self(); 976 retval = 0; 977 978 pthread_spinlock(self, &self->pt_flaglock); 979 980 if (oldtype != NULL) { 981 if (self->pt_flags & PT_FLAG_CS_ASYNC) 982 *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS; 983 else 984 *oldtype = PTHREAD_CANCEL_DEFERRED; 985 } 986 987 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { 988 self->pt_flags |= PT_FLAG_CS_ASYNC; 989 if (self->pt_cancel) { 990 pthread_spinunlock(self, &self->pt_flaglock); 991 pthread_exit(PTHREAD_CANCELED); 992 } 993 } else if (type == PTHREAD_CANCEL_DEFERRED) 994 self->pt_flags &= ~PT_FLAG_CS_ASYNC; 995 else 996 retval = EINVAL; 997 998 pthread_spinunlock(self, &self->pt_flaglock); 999 return retval; 1000 } 1001 1002 1003 void 1004 pthread_testcancel() 1005 { 1006 pthread_t self; 1007 1008 self = pthread__self(); 1009 if (self->pt_cancel) 1010 pthread_exit(PTHREAD_CANCELED); 1011 } 1012 1013 1014 /* 1015 * POSIX requires that certain functions return an error rather than 1016 * invoking undefined behavior even when handed completely bogus 1017 * pthread_t values, e.g. stack garbage or (pthread_t)666. This 1018 * utility routine searches the list of threads for the pthread_t 1019 * value without dereferencing it. 1020 */ 1021 int 1022 pthread__find(pthread_t self, pthread_t id) 1023 { 1024 pthread_t target; 1025 1026 pthread_spinlock(self, &pthread__allqueue_lock); 1027 PTQ_FOREACH(target, &pthread__allqueue, pt_allq) 1028 if (target == id) 1029 break; 1030 pthread_spinunlock(self, &pthread__allqueue_lock); 1031 1032 if (target == NULL) 1033 return ESRCH; 1034 1035 return 0; 1036 } 1037 1038 1039 void 1040 pthread__testcancel(pthread_t self) 1041 { 1042 1043 if (self->pt_cancel) 1044 pthread_exit(PTHREAD_CANCELED); 1045 } 1046 1047 1048 void 1049 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store) 1050 { 1051 pthread_t self; 1052 struct pt_clean_t *entry; 1053 1054 self = pthread__self(); 1055 entry = store; 1056 entry->ptc_cleanup = cleanup; 1057 entry->ptc_arg = arg; 1058 PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next); 1059 } 1060 1061 1062 void 1063 pthread__cleanup_pop(int ex, void *store) 1064 { 1065 pthread_t self; 1066 struct pt_clean_t *entry; 1067 1068 self = pthread__self(); 1069 entry = store; 1070 1071 PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next); 1072 if (ex) 1073 (*entry->ptc_cleanup)(entry->ptc_arg); 1074 } 1075 1076 1077 int * 1078 pthread__errno(void) 1079 { 1080 pthread_t self; 1081 1082 self = pthread__self(); 1083 1084 return &(self->pt_errno); 1085 } 1086 1087 ssize_t _sys_write(int, const void *, size_t); 1088 1089 void 1090 pthread__assertfunc(const char *file, int line, const char *function, 1091 const char *expr) 1092 { 1093 char buf[1024]; 1094 int len; 1095 1096 SDPRINTF(("(af)\n")); 1097 1098 /* 1099 * snprintf should not acquire any locks, or we could 1100 * end up deadlocked if the assert caller held locks. 1101 */ 1102 len = snprintf(buf, 1024, 1103 "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n", 1104 expr, file, line, 1105 function ? ", function \"" : "", 1106 function ? function : "", 1107 function ? "\"" : ""); 1108 1109 _sys_write(STDERR_FILENO, buf, (size_t)len); 1110 (void)kill(getpid(), SIGABRT); 1111 1112 _exit(1); 1113 } 1114 1115 1116 void 1117 pthread__errorfunc(const char *file, int line, const char *function, 1118 const char *msg) 1119 { 1120 char buf[1024]; 1121 size_t len; 1122 1123 if (pthread__diagassert == 0) 1124 return; 1125 1126 /* 1127 * snprintf should not acquire any locks, or we could 1128 * end up deadlocked if the assert caller held locks. 1129 */ 1130 len = snprintf(buf, 1024, 1131 "%s: Error detected by libpthread: %s.\n" 1132 "Detected by file \"%s\", line %d%s%s%s.\n" 1133 "See pthread(3) for information.\n", 1134 getprogname(), msg, file, line, 1135 function ? ", function \"" : "", 1136 function ? function : "", 1137 function ? "\"" : ""); 1138 1139 if (pthread__diagassert & DIAGASSERT_STDERR) 1140 _sys_write(STDERR_FILENO, buf, len); 1141 1142 if (pthread__diagassert & DIAGASSERT_SYSLOG) 1143 syslog(LOG_DEBUG | LOG_USER, "%s", buf); 1144 1145 if (pthread__diagassert & DIAGASSERT_ABORT) { 1146 (void)kill(getpid(), SIGABRT); 1147 _exit(1); 1148 } 1149 } 1150