1 /* $NetBSD: pthread.c,v 1.32 2003/12/31 16:45:48 cl Exp $ */ 2 3 /*- 4 * Copyright (c) 2001,2002,2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __RCSID("$NetBSD: pthread.c,v 1.32 2003/12/31 16:45:48 cl Exp $"); 41 42 #include <err.h> 43 #include <errno.h> 44 #include <lwp.h> 45 #include <signal.h> 46 #include <stdio.h> 47 #include <stdlib.h> 48 #include <string.h> 49 #include <syslog.h> 50 #include <ucontext.h> 51 #include <unistd.h> 52 53 #include <sched.h> 54 #include "pthread.h" 55 #include "pthread_int.h" 56 57 #ifdef PTHREAD_MAIN_DEBUG 58 #define SDPRINTF(x) DPRINTF(x) 59 #else 60 #define SDPRINTF(x) 61 #endif 62 63 static void pthread__create_tramp(void *(*start)(void *), void *arg); 64 65 int pthread__started; 66 67 pthread_spin_t pthread__allqueue_lock; 68 struct pthread_queue_t pthread__allqueue; 69 70 pthread_spin_t pthread__deadqueue_lock; 71 struct pthread_queue_t pthread__deadqueue; 72 struct pthread_queue_t pthread__reidlequeue; 73 74 static int nthreads; 75 static int nextthread; 76 static pthread_spin_t nextthread_lock; 77 static pthread_attr_t pthread_default_attr; 78 79 enum { 80 DIAGASSERT_ABORT = 1<<0, 81 DIAGASSERT_STDERR = 1<<1, 82 DIAGASSERT_SYSLOG = 1<<2 83 }; 84 85 static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR; 86 87 pthread_spin_t pthread__runqueue_lock; 88 struct pthread_queue_t pthread__runqueue; 89 struct pthread_queue_t pthread__idlequeue; 90 struct pthread_queue_t pthread__suspqueue; 91 92 __strong_alias(__libc_thr_self,pthread_self) 93 __strong_alias(__libc_thr_create,pthread_create) 94 __strong_alias(__libc_thr_exit,pthread_exit) 95 __strong_alias(__libc_thr_errno,pthread__errno) 96 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate) 97 98 /* 99 * Static library kludge. Place a reference to a symbol any library 100 * file which does not already have a reference here. 101 */ 102 extern int pthread__cancel_stub_binder; 103 extern int pthread__sched_binder; 104 extern struct pthread_queue_t pthread__nanosleeping; 105 106 void *pthread__static_lib_binder[] = { 107 &pthread__cancel_stub_binder, 108 pthread_cond_init, 109 pthread_mutex_init, 110 pthread_rwlock_init, 111 pthread_barrier_init, 112 pthread_key_create, 113 pthread_setspecific, 114 &pthread__sched_binder, 115 &pthread__nanosleeping 116 }; 117 118 /* 119 * This needs to be started by the library loading code, before main() 120 * gets to run, for various things that use the state of the initial thread 121 * to work properly (thread-specific data is an application-visible example; 122 * spinlock counts for mutexes is an internal example). 123 */ 124 void 125 pthread_init(void) 126 { 127 pthread_t first; 128 char *p; 129 extern int __isthreaded; 130 131 /* Initialize locks first; they're needed elsewhere. */ 132 pthread__lockprim_init(); 133 134 /* Basic data structure setup */ 135 pthread_attr_init(&pthread_default_attr); 136 PTQ_INIT(&pthread__allqueue); 137 PTQ_INIT(&pthread__deadqueue); 138 PTQ_INIT(&pthread__reidlequeue); 139 PTQ_INIT(&pthread__runqueue); 140 PTQ_INIT(&pthread__idlequeue); 141 nthreads = 1; 142 143 /* Create the thread structure corresponding to main() */ 144 pthread__initmain(&first); 145 pthread__initthread(first, first); 146 first->pt_state = PT_STATE_RUNNING; 147 sigprocmask(0, NULL, &first->pt_sigmask); 148 PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq); 149 150 /* Start subsystems */ 151 pthread__signal_init(); 152 PTHREAD_MD_INIT 153 #ifdef PTHREAD__DEBUG 154 pthread__debug_init(); 155 #endif 156 157 for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) { 158 switch (*p) { 159 case 'a': 160 pthread__diagassert |= DIAGASSERT_ABORT; 161 break; 162 case 'A': 163 pthread__diagassert &= ~DIAGASSERT_ABORT; 164 break; 165 case 'e': 166 pthread__diagassert |= DIAGASSERT_STDERR; 167 break; 168 case 'E': 169 pthread__diagassert &= ~DIAGASSERT_STDERR; 170 break; 171 case 'l': 172 pthread__diagassert |= DIAGASSERT_SYSLOG; 173 break; 174 case 'L': 175 pthread__diagassert &= ~DIAGASSERT_SYSLOG; 176 break; 177 } 178 } 179 180 181 /* Tell libc that we're here and it should role-play accordingly. */ 182 __isthreaded = 1; 183 } 184 185 static void 186 pthread__child_callback(void) 187 { 188 /* 189 * Clean up data structures that a forked child process might 190 * trip over. Note that if threads have been created (causing 191 * this handler to be registered) the standards say that the 192 * child will trigger undefined behavior if it makes any 193 * pthread_* calls (or any other calls that aren't 194 * async-signal-safe), so we don't really have to clean up 195 * much. Anything that permits some pthread_* calls to work is 196 * merely being polite. 197 */ 198 pthread__started = 0; 199 } 200 201 static void 202 pthread__start(void) 203 { 204 pthread_t self, idle; 205 int i, ret; 206 207 self = pthread__self(); /* should be the "main()" thread */ 208 209 /* 210 * Per-process timers are cleared by fork(); despite the 211 * various restrictions on fork() and threads, it's legal to 212 * fork() before creating any threads. 213 */ 214 pthread__alarm_init(); 215 216 pthread_atfork(NULL, NULL, pthread__child_callback); 217 218 /* Create idle threads */ 219 for (i = 0; i < NIDLETHREADS; i++) { 220 ret = pthread__stackalloc(&idle); 221 if (ret != 0) 222 err(1, "Couldn't allocate stack for idle thread!"); 223 pthread__initthread(self, idle); 224 sigfillset(&idle->pt_sigmask); 225 idle->pt_type = PT_THREAD_IDLE; 226 PTQ_INSERT_HEAD(&pthread__allqueue, idle, pt_allq); 227 pthread__sched_idle(self, idle); 228 } 229 230 /* Start up the SA subsystem */ 231 pthread__sa_start(); 232 SDPRINTF(("(pthread__start %p) Started.\n", self)); 233 } 234 235 236 /* General-purpose thread data structure sanitization. */ 237 void 238 pthread__initthread(pthread_t self, pthread_t t) 239 { 240 int id; 241 242 pthread_spinlock(self, &nextthread_lock); 243 id = nextthread; 244 nextthread++; 245 pthread_spinunlock(self, &nextthread_lock); 246 t->pt_num = id; 247 248 t->pt_magic = PT_MAGIC; 249 t->pt_type = PT_THREAD_NORMAL; 250 t->pt_state = PT_STATE_RUNNABLE; 251 pthread_lockinit(&t->pt_statelock); 252 pthread_lockinit(&t->pt_flaglock); 253 t->pt_spinlocks = 0; 254 t->pt_next = NULL; 255 t->pt_exitval = NULL; 256 t->pt_flags = 0; 257 t->pt_cancel = 0; 258 t->pt_errno = 0; 259 t->pt_parent = NULL; 260 t->pt_heldlock = NULL; 261 t->pt_switchto = NULL; 262 t->pt_trapuc = NULL; 263 sigemptyset(&t->pt_siglist); 264 sigemptyset(&t->pt_sigmask); 265 pthread_lockinit(&t->pt_siglock); 266 PTQ_INIT(&t->pt_joiners); 267 pthread_lockinit(&t->pt_join_lock); 268 PTQ_INIT(&t->pt_cleanup_stack); 269 memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX); 270 t->pt_name = NULL; 271 #ifdef PTHREAD__DEBUG 272 t->blocks = 0; 273 t->preempts = 0; 274 t->rescheds = 0; 275 #endif 276 } 277 278 279 int 280 pthread_create(pthread_t *thread, const pthread_attr_t *attr, 281 void *(*startfunc)(void *), void *arg) 282 { 283 pthread_t self, newthread; 284 pthread_attr_t nattr; 285 struct pthread_attr_private *p; 286 char *name; 287 int ret; 288 289 PTHREADD_ADD(PTHREADD_CREATE); 290 291 /* 292 * It's okay to check this without a lock because there can 293 * only be one thread before it becomes true. 294 */ 295 if (pthread__started == 0) { 296 pthread__start(); 297 pthread__started = 1; 298 } 299 300 if (attr == NULL) 301 nattr = pthread_default_attr; 302 else if (attr->pta_magic == PT_ATTR_MAGIC) 303 nattr = *attr; 304 else 305 return EINVAL; 306 307 /* Fetch misc. attributes from the attr structure. */ 308 name = NULL; 309 if ((p = nattr.pta_private) != NULL) 310 if (p->ptap_name[0] != '\0') 311 if ((name = strdup(p->ptap_name)) == NULL) 312 return ENOMEM; 313 314 self = pthread__self(); 315 316 pthread_spinlock(self, &pthread__deadqueue_lock); 317 if (!PTQ_EMPTY(&pthread__deadqueue)) { 318 newthread = PTQ_FIRST(&pthread__deadqueue); 319 PTQ_REMOVE(&pthread__deadqueue, newthread, pt_allq); 320 pthread_spinunlock(self, &pthread__deadqueue_lock); 321 } else { 322 pthread_spinunlock(self, &pthread__deadqueue_lock); 323 /* Set up a stack and allocate space for a pthread_st. */ 324 ret = pthread__stackalloc(&newthread); 325 if (ret != 0) { 326 if (name) 327 free(name); 328 return ret; 329 } 330 } 331 332 /* 2. Set up state. */ 333 pthread__initthread(self, newthread); 334 newthread->pt_flags = nattr.pta_flags; 335 newthread->pt_sigmask = self->pt_sigmask; 336 337 /* 3. Set up misc. attributes. */ 338 newthread->pt_name = name; 339 340 /* 341 * 4. Set up context. 342 * 343 * The pt_uc pointer points to a location safely below the 344 * stack start; this is arranged by pthread__stackalloc(). 345 */ 346 _INITCONTEXT_U(newthread->pt_uc); 347 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER 348 pthread__uc_id(newthread->pt_uc) = newthread; 349 #endif 350 newthread->pt_uc->uc_stack = newthread->pt_stack; 351 newthread->pt_uc->uc_link = NULL; 352 makecontext(newthread->pt_uc, pthread__create_tramp, 2, 353 startfunc, arg); 354 355 /* 5. Add to list of all threads. */ 356 pthread_spinlock(self, &pthread__allqueue_lock); 357 PTQ_INSERT_HEAD(&pthread__allqueue, newthread, pt_allq); 358 nthreads++; 359 pthread_spinunlock(self, &pthread__allqueue_lock); 360 361 SDPRINTF(("(pthread_create %p) Created new thread %p (name pointer %p).\n", self, newthread, newthread->pt_name)); 362 /* 6. Put on appropriate queue. */ 363 if (newthread->pt_flags & PT_FLAG_SUSPENDED) { 364 pthread_spinlock(self, &newthread->pt_statelock); 365 pthread__suspend(self, newthread); 366 pthread_spinunlock(self, &newthread->pt_statelock); 367 } else 368 pthread__sched(self, newthread); 369 370 *thread = newthread; 371 372 return 0; 373 } 374 375 376 static void 377 pthread__create_tramp(void *(*start)(void *), void *arg) 378 { 379 void *retval; 380 381 retval = start(arg); 382 383 pthread_exit(retval); 384 385 /*NOTREACHED*/ 386 pthread__abort(); 387 } 388 389 int 390 pthread_suspend_np(pthread_t thread) 391 { 392 pthread_t self = pthread__self(); 393 if (self == thread) { 394 fprintf(stderr, "suspend_np: can't suspend self\n"); 395 return EDEADLK; 396 } 397 SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p (state %d).\n", 398 self, thread, thread->pt_state)); 399 pthread_spinlock(self, &thread->pt_statelock); 400 if (thread->pt_blockgen != thread->pt_unblockgen) { 401 /* XXX flaglock? */ 402 thread->pt_flags |= PT_FLAG_SUSPENDED; 403 pthread_spinunlock(self, &thread->pt_statelock); 404 return 0; 405 } 406 switch (thread->pt_state) { 407 case PT_STATE_RUNNING: 408 pthread__abort(); /* XXX */ 409 break; 410 case PT_STATE_SUSPENDED: 411 pthread_spinunlock(self, &thread->pt_statelock); 412 return 0; 413 case PT_STATE_RUNNABLE: 414 pthread_spinlock(self, &pthread__runqueue_lock); 415 PTQ_REMOVE(&pthread__runqueue, thread, pt_runq); 416 pthread_spinunlock(self, &pthread__runqueue_lock); 417 break; 418 case PT_STATE_BLOCKED_QUEUE: 419 pthread_spinlock(self, thread->pt_sleeplock); 420 PTQ_REMOVE(thread->pt_sleepq, thread, pt_sleep); 421 pthread_spinunlock(self, thread->pt_sleeplock); 422 break; 423 default: 424 break; /* XXX */ 425 } 426 pthread__suspend(self, thread); 427 pthread_spinunlock(self, &thread->pt_statelock); 428 return 0; 429 } 430 431 int 432 pthread_resume_np(pthread_t thread) 433 { 434 435 pthread_t self = pthread__self(); 436 SDPRINTF(("(pthread_resume_np %p) Resume thread %p (state %d).\n", 437 self, thread, thread->pt_state)); 438 pthread_spinlock(self, &thread->pt_statelock); 439 /* XXX flaglock? */ 440 thread->pt_flags &= ~PT_FLAG_SUSPENDED; 441 if (thread->pt_state == PT_STATE_SUSPENDED) { 442 pthread_spinlock(self, &pthread__runqueue_lock); 443 PTQ_REMOVE(&pthread__suspqueue, thread, pt_runq); 444 pthread_spinunlock(self, &pthread__runqueue_lock); 445 pthread__sched(self, thread); 446 } 447 pthread_spinunlock(self, &thread->pt_statelock); 448 return 0; 449 } 450 451 452 /* 453 * Other threads will switch to the idle thread so that they 454 * can dispose of any awkward locks or recycle upcall state. 455 */ 456 void 457 pthread__idle(void) 458 { 459 pthread_t self; 460 461 PTHREADD_ADD(PTHREADD_IDLE); 462 self = pthread__self(); 463 SDPRINTF(("(pthread__idle %p).\n", self)); 464 465 /* 466 * The drill here is that we want to yield the processor, 467 * but for the thread itself to be recovered, we need to be on 468 * a list somewhere for the thread system to know about us. 469 */ 470 pthread_spinlock(self, &pthread__deadqueue_lock); 471 PTQ_INSERT_TAIL(&pthread__reidlequeue, self, pt_runq); 472 /* Don't need a flag lock; nothing else has a handle on this thread */ 473 self->pt_flags |= PT_FLAG_IDLED; 474 pthread_spinunlock(self, &pthread__deadqueue_lock); 475 476 /* 477 * If we get to run this, then no preemption has happened 478 * (because the upcall handler will not continue an idle thread with 479 * PT_FLAG_IDLED set), and so we can yield the processor safely. 480 */ 481 SDPRINTF(("(pthread__idle %p) yielding.\n", self)); 482 sa_yield(); 483 484 /* NOTREACHED */ 485 self->pt_spinlocks++; /* XXX make sure we get to finish the assert! */ 486 SDPRINTF(("(pthread__idle %p) Returned! Error.\n", self)); 487 pthread__abort(); 488 } 489 490 491 void 492 pthread_exit(void *retval) 493 { 494 pthread_t self; 495 struct pt_clean_t *cleanup; 496 char *name; 497 int nt, flags; 498 499 self = pthread__self(); 500 SDPRINTF(("(pthread_exit %p) Exiting (status %p, flags %x, cancel %d).\n", self, retval, self->pt_flags, self->pt_cancel)); 501 502 /* Disable cancellability. */ 503 pthread_spinlock(self, &self->pt_flaglock); 504 self->pt_flags |= PT_FLAG_CS_DISABLED; 505 flags = self->pt_flags; 506 self->pt_cancel = 0; 507 pthread_spinunlock(self, &self->pt_flaglock); 508 509 /* Call any cancellation cleanup handlers */ 510 while (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 511 cleanup = PTQ_FIRST(&self->pt_cleanup_stack); 512 PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next); 513 (*cleanup->ptc_cleanup)(cleanup->ptc_arg); 514 } 515 516 /* Perform cleanup of thread-specific data */ 517 pthread__destroy_tsd(self); 518 519 self->pt_exitval = retval; 520 521 if (flags & PT_FLAG_DETACHED) { 522 name = self->pt_name; 523 self->pt_name = NULL; 524 525 if (name != NULL) 526 free(name); 527 528 pthread_spinlock(self, &pthread__allqueue_lock); 529 PTQ_REMOVE(&pthread__allqueue, self, pt_allq); 530 nthreads--; 531 nt = nthreads; 532 pthread_spinunlock(self, &pthread__allqueue_lock); 533 534 self->pt_state = PT_STATE_DEAD; 535 if (nt == 0) { 536 /* Whoah, we're the last one. Time to go. */ 537 exit(0); 538 } 539 540 /* Yeah, yeah, doing work while we're dead is tacky. */ 541 pthread_spinlock(self, &pthread__deadqueue_lock); 542 PTQ_INSERT_HEAD(&pthread__deadqueue, self, pt_allq); 543 pthread__block(self, &pthread__deadqueue_lock); 544 } else { 545 /* Note: name will be freed by the joiner. */ 546 pthread_spinlock(self, &self->pt_join_lock); 547 pthread_spinlock(self, &pthread__allqueue_lock); 548 nthreads--; 549 nt = nthreads; 550 self->pt_state = PT_STATE_ZOMBIE; 551 pthread_spinunlock(self, &pthread__allqueue_lock); 552 if (nt == 0) { 553 /* Whoah, we're the last one. Time to go. */ 554 exit(0); 555 } 556 /* 557 * Wake up all the potential joiners. Only one can win. 558 * (Can you say "Thundering Herd"? I knew you could.) 559 */ 560 pthread__sched_sleepers(self, &self->pt_joiners); 561 pthread__block(self, &self->pt_join_lock); 562 } 563 564 /*NOTREACHED*/ 565 pthread__abort(); 566 exit(1); 567 } 568 569 570 int 571 pthread_join(pthread_t thread, void **valptr) 572 { 573 pthread_t self; 574 char *name; 575 int num; 576 577 self = pthread__self(); 578 SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread)); 579 580 if (pthread__find(self, thread) != 0) 581 return ESRCH; 582 583 if (thread->pt_magic != PT_MAGIC) 584 return EINVAL; 585 586 if (thread == self) 587 return EDEADLK; 588 589 pthread_spinlock(self, &thread->pt_flaglock); 590 591 if (thread->pt_flags & PT_FLAG_DETACHED) { 592 pthread_spinunlock(self, &thread->pt_flaglock); 593 return EINVAL; 594 } 595 596 num = thread->pt_num; 597 pthread_spinlock(self, &thread->pt_join_lock); 598 while (thread->pt_state != PT_STATE_ZOMBIE) { 599 if ((thread->pt_state == PT_STATE_DEAD) || 600 (thread->pt_flags & PT_FLAG_DETACHED) || 601 (thread->pt_num != num)) { 602 /* 603 * Another thread beat us to the join, or called 604 * pthread_detach(). If num didn't match, the 605 * thread died and was recycled before we got 606 * another chance to run. 607 */ 608 pthread_spinunlock(self, &thread->pt_join_lock); 609 pthread_spinunlock(self, &thread->pt_flaglock); 610 return ESRCH; 611 } 612 /* 613 * "I'm not dead yet!" 614 * "You will be soon enough." 615 */ 616 pthread_spinunlock(self, &thread->pt_flaglock); 617 pthread_spinlock(self, &self->pt_statelock); 618 if (self->pt_cancel) { 619 pthread_spinunlock(self, &self->pt_statelock); 620 pthread_spinunlock(self, &thread->pt_join_lock); 621 pthread_exit(PTHREAD_CANCELED); 622 } 623 self->pt_state = PT_STATE_BLOCKED_QUEUE; 624 self->pt_sleepobj = thread; 625 self->pt_sleepq = &thread->pt_joiners; 626 self->pt_sleeplock = &thread->pt_join_lock; 627 pthread_spinunlock(self, &self->pt_statelock); 628 629 PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep); 630 pthread__block(self, &thread->pt_join_lock); 631 pthread_spinlock(self, &thread->pt_flaglock); 632 pthread_spinlock(self, &thread->pt_join_lock); 633 } 634 635 /* All ours. */ 636 thread->pt_state = PT_STATE_DEAD; 637 name = thread->pt_name; 638 thread->pt_name = NULL; 639 pthread_spinunlock(self, &thread->pt_join_lock); 640 pthread_spinunlock(self, &thread->pt_flaglock); 641 642 if (valptr != NULL) 643 *valptr = thread->pt_exitval; 644 645 SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread)); 646 647 /* Cleanup time. Move the dead thread from allqueue to the deadqueue */ 648 pthread_spinlock(self, &pthread__allqueue_lock); 649 PTQ_REMOVE(&pthread__allqueue, thread, pt_allq); 650 pthread_spinunlock(self, &pthread__allqueue_lock); 651 652 pthread_spinlock(self, &pthread__deadqueue_lock); 653 PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_allq); 654 pthread_spinunlock(self, &pthread__deadqueue_lock); 655 656 if (name != NULL) 657 free(name); 658 659 return 0; 660 } 661 662 663 int 664 pthread_equal(pthread_t t1, pthread_t t2) 665 { 666 667 /* Nothing special here. */ 668 return (t1 == t2); 669 } 670 671 672 int 673 pthread_detach(pthread_t thread) 674 { 675 pthread_t self; 676 677 self = pthread__self(); 678 679 if (pthread__find(self, thread) != 0) 680 return ESRCH; 681 682 if (thread->pt_magic != PT_MAGIC) 683 return EINVAL; 684 685 pthread_spinlock(self, &thread->pt_flaglock); 686 pthread_spinlock(self, &thread->pt_join_lock); 687 688 if (thread->pt_flags & PT_FLAG_DETACHED) { 689 pthread_spinunlock(self, &thread->pt_join_lock); 690 pthread_spinunlock(self, &thread->pt_flaglock); 691 return EINVAL; 692 } 693 694 thread->pt_flags |= PT_FLAG_DETACHED; 695 696 /* Any joiners have to be punted now. */ 697 pthread__sched_sleepers(self, &thread->pt_joiners); 698 699 pthread_spinunlock(self, &thread->pt_join_lock); 700 pthread_spinunlock(self, &thread->pt_flaglock); 701 702 return 0; 703 } 704 705 706 int 707 pthread_getname_np(pthread_t thread, char *name, size_t len) 708 { 709 pthread_t self; 710 711 self = pthread__self(); 712 713 if (pthread__find(self, thread) != 0) 714 return ESRCH; 715 716 if (thread->pt_magic != PT_MAGIC) 717 return EINVAL; 718 719 pthread_spinlock(self, &thread->pt_join_lock); 720 if (thread->pt_name == NULL) 721 name[0] = '\0'; 722 else 723 strlcpy(name, thread->pt_name, len); 724 pthread_spinunlock(self, &thread->pt_join_lock); 725 726 return 0; 727 } 728 729 730 int 731 pthread_setname_np(pthread_t thread, const char *name, void *arg) 732 { 733 pthread_t self = pthread_self(); 734 char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP]; 735 int namelen; 736 737 if (pthread__find(self, thread) != 0) 738 return ESRCH; 739 740 if (thread->pt_magic != PT_MAGIC) 741 return EINVAL; 742 743 namelen = snprintf(newname, sizeof(newname), name, arg); 744 if (namelen >= PTHREAD_MAX_NAMELEN_NP) 745 return EINVAL; 746 747 cp = strdup(newname); 748 if (cp == NULL) 749 return ENOMEM; 750 751 pthread_spinlock(self, &thread->pt_join_lock); 752 753 if (thread->pt_state == PT_STATE_DEAD) { 754 pthread_spinunlock(self, &thread->pt_join_lock); 755 free(cp); 756 return EINVAL; 757 } 758 759 oldname = thread->pt_name; 760 thread->pt_name = cp; 761 762 pthread_spinunlock(self, &thread->pt_join_lock); 763 764 if (oldname != NULL) 765 free(oldname); 766 767 return 0; 768 } 769 770 771 772 /* 773 * XXX There should be a way for applications to use the efficent 774 * inline version, but there are opacity/namespace issues. 775 */ 776 pthread_t 777 pthread_self(void) 778 { 779 780 return pthread__self(); 781 } 782 783 784 int 785 pthread_cancel(pthread_t thread) 786 { 787 pthread_t self; 788 789 if (!(thread->pt_state == PT_STATE_RUNNING || 790 thread->pt_state == PT_STATE_RUNNABLE || 791 thread->pt_state == PT_STATE_BLOCKED_QUEUE)) 792 return ESRCH; 793 794 self = pthread__self(); 795 796 pthread_spinlock(self, &thread->pt_flaglock); 797 thread->pt_flags |= PT_FLAG_CS_PENDING; 798 if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) { 799 thread->pt_cancel = 1; 800 pthread_spinunlock(self, &thread->pt_flaglock); 801 pthread_spinlock(self, &thread->pt_statelock); 802 if (thread->pt_blockgen != thread->pt_unblockgen) { 803 /* 804 * It's sleeping in the kernel. If we can wake 805 * it up, it will notice the cancellation when 806 * it returns. If it doesn't wake up when we 807 * make this call, then it's blocked 808 * uninterruptably in the kernel, and there's 809 * not much to be done about it. 810 */ 811 _lwp_wakeup(thread->pt_blockedlwp); 812 } else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) { 813 /* 814 * We're blocked somewhere (pthread__block() 815 * was called). Cause it to wake up; it will 816 * check for the cancellation if the routine 817 * is a cancellation point, and loop and reblock 818 * otherwise. 819 */ 820 pthread_spinlock(self, thread->pt_sleeplock); 821 PTQ_REMOVE(thread->pt_sleepq, thread, 822 pt_sleep); 823 pthread_spinunlock(self, thread->pt_sleeplock); 824 pthread__sched(self, thread); 825 } else { 826 /* 827 * Nothing. The target thread is running and will 828 * notice at the next deferred cancellation point. 829 */ 830 } 831 pthread_spinunlock(self, &thread->pt_statelock); 832 } else 833 pthread_spinunlock(self, &thread->pt_flaglock); 834 835 return 0; 836 } 837 838 839 int 840 pthread_setcancelstate(int state, int *oldstate) 841 { 842 pthread_t self; 843 int retval; 844 845 self = pthread__self(); 846 retval = 0; 847 848 pthread_spinlock(self, &self->pt_flaglock); 849 if (oldstate != NULL) { 850 if (self->pt_flags & PT_FLAG_CS_DISABLED) 851 *oldstate = PTHREAD_CANCEL_DISABLE; 852 else 853 *oldstate = PTHREAD_CANCEL_ENABLE; 854 } 855 856 if (state == PTHREAD_CANCEL_DISABLE) { 857 self->pt_flags |= PT_FLAG_CS_DISABLED; 858 if (self->pt_cancel) { 859 self->pt_flags |= PT_FLAG_CS_PENDING; 860 self->pt_cancel = 0; 861 } 862 } else if (state == PTHREAD_CANCEL_ENABLE) { 863 self->pt_flags &= ~PT_FLAG_CS_DISABLED; 864 /* 865 * If a cancellation was requested while cancellation 866 * was disabled, note that fact for future 867 * cancellation tests. 868 */ 869 if (self->pt_flags & PT_FLAG_CS_PENDING) { 870 self->pt_cancel = 1; 871 /* This is not a deferred cancellation point. */ 872 if (self->pt_flags & PT_FLAG_CS_ASYNC) { 873 pthread_spinunlock(self, &self->pt_flaglock); 874 pthread_exit(PTHREAD_CANCELED); 875 } 876 } 877 } else 878 retval = EINVAL; 879 880 pthread_spinunlock(self, &self->pt_flaglock); 881 return retval; 882 } 883 884 885 int 886 pthread_setcanceltype(int type, int *oldtype) 887 { 888 pthread_t self; 889 int retval; 890 891 self = pthread__self(); 892 retval = 0; 893 894 pthread_spinlock(self, &self->pt_flaglock); 895 896 if (oldtype != NULL) { 897 if (self->pt_flags & PT_FLAG_CS_ASYNC) 898 *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS; 899 else 900 *oldtype = PTHREAD_CANCEL_DEFERRED; 901 } 902 903 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { 904 self->pt_flags |= PT_FLAG_CS_ASYNC; 905 if (self->pt_cancel) { 906 pthread_spinunlock(self, &self->pt_flaglock); 907 pthread_exit(PTHREAD_CANCELED); 908 } 909 } else if (type == PTHREAD_CANCEL_DEFERRED) 910 self->pt_flags &= ~PT_FLAG_CS_ASYNC; 911 else 912 retval = EINVAL; 913 914 pthread_spinunlock(self, &self->pt_flaglock); 915 return retval; 916 } 917 918 919 void 920 pthread_testcancel() 921 { 922 pthread_t self; 923 924 self = pthread__self(); 925 if (self->pt_cancel) 926 pthread_exit(PTHREAD_CANCELED); 927 } 928 929 930 /* 931 * POSIX requires that certain functions return an error rather than 932 * invoking undefined behavior even when handed completely bogus 933 * pthread_t values, e.g. stack garbage or (pthread_t)666. This 934 * utility routine searches the list of threads for the pthread_t 935 * value without dereferencing it. 936 */ 937 int 938 pthread__find(pthread_t self, pthread_t id) 939 { 940 pthread_t target; 941 942 pthread_spinlock(self, &pthread__allqueue_lock); 943 PTQ_FOREACH(target, &pthread__allqueue, pt_allq) 944 if (target == id) 945 break; 946 pthread_spinunlock(self, &pthread__allqueue_lock); 947 948 if (target == NULL) 949 return ESRCH; 950 951 return 0; 952 } 953 954 955 void 956 pthread__testcancel(pthread_t self) 957 { 958 959 if (self->pt_cancel) 960 pthread_exit(PTHREAD_CANCELED); 961 } 962 963 964 void 965 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store) 966 { 967 pthread_t self; 968 struct pt_clean_t *entry; 969 970 self = pthread__self(); 971 entry = store; 972 entry->ptc_cleanup = cleanup; 973 entry->ptc_arg = arg; 974 PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next); 975 } 976 977 978 void 979 pthread__cleanup_pop(int ex, void *store) 980 { 981 pthread_t self; 982 struct pt_clean_t *entry; 983 984 self = pthread__self(); 985 entry = store; 986 987 PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next); 988 if (ex) 989 (*entry->ptc_cleanup)(entry->ptc_arg); 990 } 991 992 993 int * 994 pthread__errno(void) 995 { 996 pthread_t self; 997 998 self = pthread__self(); 999 1000 return &(self->pt_errno); 1001 } 1002 1003 ssize_t _sys_write(int, const void *, size_t); 1004 1005 void 1006 pthread__assertfunc(char *file, int line, char *function, char *expr) 1007 { 1008 char buf[1024]; 1009 int len; 1010 1011 /* 1012 * snprintf should not acquire any locks, or we could 1013 * end up deadlocked if the assert caller held locks. 1014 */ 1015 len = snprintf(buf, 1024, 1016 "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n", 1017 expr, file, line, 1018 function ? ", function \"" : "", 1019 function ? function : "", 1020 function ? "\"" : ""); 1021 1022 _sys_write(STDERR_FILENO, buf, (size_t)len); 1023 (void)kill(getpid(), SIGABRT); 1024 1025 _exit(1); 1026 } 1027 1028 1029 void 1030 pthread__errorfunc(char *file, int line, char *function, char *msg) 1031 { 1032 char buf[1024]; 1033 size_t len; 1034 1035 if (pthread__diagassert == 0) 1036 return; 1037 1038 /* 1039 * snprintf should not acquire any locks, or we could 1040 * end up deadlocked if the assert caller held locks. 1041 */ 1042 len = snprintf(buf, 1024, 1043 "%s: Error detected by libpthread: %s.\n" 1044 "Detected by file \"%s\", line %d%s%s%s.\n" 1045 "See pthread(3) for information.\n", 1046 getprogname(), msg, file, line, 1047 function ? ", function \"" : "", 1048 function ? function : "", 1049 function ? "\"" : ""); 1050 1051 if (pthread__diagassert & DIAGASSERT_STDERR) 1052 _sys_write(STDERR_FILENO, buf, len); 1053 1054 if (pthread__diagassert & DIAGASSERT_SYSLOG) 1055 syslog(LOG_DEBUG | LOG_USER, "%s", buf); 1056 1057 if (pthread__diagassert & DIAGASSERT_ABORT) { 1058 (void)kill(getpid(), SIGABRT); 1059 _exit(1); 1060 } 1061 } 1062