1 /* $NetBSD: pthread.c,v 1.145 2014/12/16 20:05:54 pooka Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __RCSID("$NetBSD: pthread.c,v 1.145 2014/12/16 20:05:54 pooka Exp $"); 34 35 #define __EXPOSE_STACK 1 36 37 #include <sys/param.h> 38 #include <sys/exec_elf.h> 39 #include <sys/mman.h> 40 #include <sys/lwp.h> 41 #include <sys/lwpctl.h> 42 #include <sys/resource.h> 43 #include <sys/tls.h> 44 45 #include <assert.h> 46 #include <dlfcn.h> 47 #include <err.h> 48 #include <errno.h> 49 #include <lwp.h> 50 #include <signal.h> 51 #include <stdio.h> 52 #include <stdlib.h> 53 #include <stddef.h> 54 #include <string.h> 55 #include <syslog.h> 56 #include <ucontext.h> 57 #include <unistd.h> 58 #include <sched.h> 59 60 #include "pthread.h" 61 #include "pthread_int.h" 62 #include "pthread_makelwp.h" 63 #include "reentrant.h" 64 65 pthread_rwlock_t pthread__alltree_lock = PTHREAD_RWLOCK_INITIALIZER; 66 static rb_tree_t pthread__alltree; 67 68 static signed int pthread__cmp(void *, const void *, const void *); 69 70 static const rb_tree_ops_t pthread__alltree_ops = { 71 .rbto_compare_nodes = pthread__cmp, 72 .rbto_compare_key = pthread__cmp, 73 .rbto_node_offset = offsetof(struct __pthread_st, pt_alltree), 74 .rbto_context = NULL 75 }; 76 77 static void pthread__create_tramp(void *); 78 static void pthread__initthread(pthread_t); 79 static void pthread__scrubthread(pthread_t, char *, int); 80 static void pthread__initmain(pthread_t *); 81 static void pthread__fork_callback(void); 82 static void pthread__reap(pthread_t); 83 static void pthread__child_callback(void); 84 static void pthread__start(void); 85 86 void pthread__init(void); 87 88 int pthread__started; 89 int __uselibcstub = 1; 90 pthread_mutex_t pthread__deadqueue_lock = PTHREAD_MUTEX_INITIALIZER; 91 pthread_queue_t pthread__deadqueue; 92 pthread_queue_t pthread__allqueue; 93 94 static pthread_attr_t pthread_default_attr; 95 static lwpctl_t pthread__dummy_lwpctl = { .lc_curcpu = LWPCTL_CPU_NONE }; 96 97 enum { 98 DIAGASSERT_ABORT = 1<<0, 99 DIAGASSERT_STDERR = 1<<1, 100 DIAGASSERT_SYSLOG = 1<<2 101 }; 102 103 static int pthread__diagassert; 104 105 int pthread__concurrency; 106 int pthread__nspins; 107 int pthread__unpark_max = PTHREAD__UNPARK_MAX; 108 int pthread__dbg; /* set by libpthread_dbg if active */ 109 110 /* 111 * We have to initialize the pthread_stack* variables here because 112 * mutexes are used before pthread_init() and thus pthread__initmain() 113 * are called. Since mutexes only save the stack pointer and not a 114 * pointer to the thread data, it is safe to change the mapping from 115 * stack pointer to thread data afterwards. 116 */ 117 size_t pthread__stacksize; 118 size_t pthread__pagesize; 119 static struct __pthread_st pthread__main; 120 121 int _sys___sigprocmask14(int, const sigset_t *, sigset_t *); 122 123 __strong_alias(__libc_thr_self,pthread_self) 124 __strong_alias(__libc_thr_create,pthread_create) 125 __strong_alias(__libc_thr_exit,pthread_exit) 126 __strong_alias(__libc_thr_errno,pthread__errno) 127 __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate) 128 __strong_alias(__libc_thr_equal,pthread_equal) 129 __strong_alias(__libc_thr_init,pthread__init) 130 131 /* 132 * Static library kludge. Place a reference to a symbol any library 133 * file which does not already have a reference here. 134 */ 135 extern int pthread__cancel_stub_binder; 136 137 void *pthread__static_lib_binder[] = { 138 &pthread__cancel_stub_binder, 139 pthread_cond_init, 140 pthread_mutex_init, 141 pthread_rwlock_init, 142 pthread_barrier_init, 143 pthread_key_create, 144 pthread_setspecific, 145 }; 146 147 #define NHASHLOCK 64 148 149 static union hashlock { 150 pthread_mutex_t mutex; 151 char pad[64]; 152 } hashlocks[NHASHLOCK] __aligned(64); 153 154 /* 155 * This needs to be started by the library loading code, before main() 156 * gets to run, for various things that use the state of the initial thread 157 * to work properly (thread-specific data is an application-visible example; 158 * spinlock counts for mutexes is an internal example). 159 */ 160 void 161 pthread__init(void) 162 { 163 pthread_t first; 164 char *p; 165 int i; 166 extern int __isthreaded; 167 168 __uselibcstub = 0; 169 170 pthread__pagesize = (size_t)sysconf(_SC_PAGESIZE); 171 pthread__concurrency = (int)sysconf(_SC_NPROCESSORS_CONF); 172 173 /* Initialize locks first; they're needed elsewhere. */ 174 pthread__lockprim_init(); 175 for (i = 0; i < NHASHLOCK; i++) { 176 pthread_mutex_init(&hashlocks[i].mutex, NULL); 177 } 178 179 /* Fetch parameters. */ 180 i = (int)_lwp_unpark_all(NULL, 0, NULL); 181 if (i == -1) 182 err(1, "_lwp_unpark_all"); 183 if (i < pthread__unpark_max) 184 pthread__unpark_max = i; 185 186 /* Basic data structure setup */ 187 pthread_attr_init(&pthread_default_attr); 188 PTQ_INIT(&pthread__allqueue); 189 PTQ_INIT(&pthread__deadqueue); 190 191 rb_tree_init(&pthread__alltree, &pthread__alltree_ops); 192 193 /* Create the thread structure corresponding to main() */ 194 pthread__initmain(&first); 195 pthread__initthread(first); 196 pthread__scrubthread(first, NULL, 0); 197 198 first->pt_lid = _lwp_self(); 199 PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq); 200 (void)rb_tree_insert_node(&pthread__alltree, first); 201 202 if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &first->pt_lwpctl) != 0) { 203 err(1, "_lwp_ctl"); 204 } 205 206 /* Start subsystems */ 207 PTHREAD_MD_INIT 208 209 for (p = pthread__getenv("PTHREAD_DIAGASSERT"); p && *p; p++) { 210 switch (*p) { 211 case 'a': 212 pthread__diagassert |= DIAGASSERT_ABORT; 213 break; 214 case 'A': 215 pthread__diagassert &= ~DIAGASSERT_ABORT; 216 break; 217 case 'e': 218 pthread__diagassert |= DIAGASSERT_STDERR; 219 break; 220 case 'E': 221 pthread__diagassert &= ~DIAGASSERT_STDERR; 222 break; 223 case 'l': 224 pthread__diagassert |= DIAGASSERT_SYSLOG; 225 break; 226 case 'L': 227 pthread__diagassert &= ~DIAGASSERT_SYSLOG; 228 break; 229 } 230 } 231 232 /* Tell libc that we're here and it should role-play accordingly. */ 233 pthread_atfork(NULL, NULL, pthread__fork_callback); 234 __isthreaded = 1; 235 } 236 237 static void 238 pthread__fork_callback(void) 239 { 240 struct __pthread_st *self = pthread__self(); 241 242 /* lwpctl state is not copied across fork. */ 243 if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) { 244 err(1, "_lwp_ctl"); 245 } 246 self->pt_lid = _lwp_self(); 247 } 248 249 static void 250 pthread__child_callback(void) 251 { 252 253 /* 254 * Clean up data structures that a forked child process might 255 * trip over. Note that if threads have been created (causing 256 * this handler to be registered) the standards say that the 257 * child will trigger undefined behavior if it makes any 258 * pthread_* calls (or any other calls that aren't 259 * async-signal-safe), so we don't really have to clean up 260 * much. Anything that permits some pthread_* calls to work is 261 * merely being polite. 262 */ 263 pthread__started = 0; 264 } 265 266 static void 267 pthread__start(void) 268 { 269 270 /* 271 * Per-process timers are cleared by fork(); despite the 272 * various restrictions on fork() and threads, it's legal to 273 * fork() before creating any threads. 274 */ 275 pthread_atfork(NULL, NULL, pthread__child_callback); 276 } 277 278 279 /* General-purpose thread data structure sanitization. */ 280 /* ARGSUSED */ 281 static void 282 pthread__initthread(pthread_t t) 283 { 284 285 t->pt_self = t; 286 t->pt_magic = PT_MAGIC; 287 t->pt_willpark = 0; 288 t->pt_unpark = 0; 289 t->pt_nwaiters = 0; 290 t->pt_sleepobj = NULL; 291 t->pt_signalled = 0; 292 t->pt_havespecific = 0; 293 t->pt_early = NULL; 294 t->pt_lwpctl = &pthread__dummy_lwpctl; 295 t->pt_blocking = 0; 296 t->pt_droplock = NULL; 297 298 memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops)); 299 pthread_mutex_init(&t->pt_lock, NULL); 300 PTQ_INIT(&t->pt_cleanup_stack); 301 pthread_cond_init(&t->pt_joiners, NULL); 302 memset(&t->pt_specific, 0, sizeof(t->pt_specific)); 303 } 304 305 static void 306 pthread__scrubthread(pthread_t t, char *name, int flags) 307 { 308 309 t->pt_state = PT_STATE_RUNNING; 310 t->pt_exitval = NULL; 311 t->pt_flags = flags; 312 t->pt_cancel = 0; 313 t->pt_errno = 0; 314 t->pt_name = name; 315 t->pt_lid = 0; 316 } 317 318 static int 319 pthread__getstack(pthread_t newthread, const pthread_attr_t *attr) 320 { 321 void *stackbase, *stackbase2, *redzone; 322 size_t stacksize, guardsize; 323 bool allocated; 324 325 if (attr != NULL) { 326 pthread_attr_getstack(attr, &stackbase, &stacksize); 327 } else { 328 stackbase = NULL; 329 stacksize = 0; 330 } 331 if (stacksize == 0) 332 stacksize = pthread__stacksize; 333 334 if (newthread->pt_stack_allocated) { 335 if (stackbase == NULL && 336 newthread->pt_stack.ss_size == stacksize) 337 return 0; 338 stackbase2 = newthread->pt_stack.ss_sp; 339 #ifndef __MACHINE_STACK_GROWS_UP 340 stackbase2 = (char *)stackbase2 - newthread->pt_guardsize; 341 #endif 342 munmap(stackbase2, 343 newthread->pt_stack.ss_size + newthread->pt_guardsize); 344 newthread->pt_stack.ss_sp = NULL; 345 newthread->pt_stack.ss_size = 0; 346 newthread->pt_guardsize = 0; 347 newthread->pt_stack_allocated = false; 348 } 349 350 newthread->pt_stack_allocated = false; 351 352 if (stackbase == NULL) { 353 stacksize = ((stacksize - 1) | (pthread__pagesize - 1)) + 1; 354 guardsize = pthread__pagesize; 355 stackbase = mmap(NULL, stacksize + guardsize, 356 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, (off_t)0); 357 if (stackbase == MAP_FAILED) 358 return ENOMEM; 359 allocated = true; 360 } else { 361 guardsize = 0; 362 allocated = false; 363 } 364 #ifdef __MACHINE_STACK_GROWS_UP 365 redzone = (char *)stackbase + stacksize; 366 stackbase2 = (char *)stackbase; 367 #else 368 redzone = (char *)stackbase; 369 stackbase2 = (char *)stackbase + guardsize; 370 #endif 371 if (allocated && guardsize && 372 mprotect(redzone, guardsize, PROT_NONE) == -1) { 373 munmap(stackbase, stacksize + guardsize); 374 return EPERM; 375 } 376 newthread->pt_stack.ss_size = stacksize; 377 newthread->pt_stack.ss_sp = stackbase2; 378 newthread->pt_guardsize = guardsize; 379 newthread->pt_stack_allocated = allocated; 380 return 0; 381 } 382 383 int 384 pthread_create(pthread_t *thread, const pthread_attr_t *attr, 385 void *(*startfunc)(void *), void *arg) 386 { 387 pthread_t newthread; 388 pthread_attr_t nattr; 389 struct pthread_attr_private *p; 390 char * volatile name; 391 unsigned long flag; 392 void *private_area; 393 int ret; 394 395 if (__predict_false(__uselibcstub)) { 396 pthread__errorfunc(__FILE__, __LINE__, __func__, 397 "pthread_create() requires linking with -lpthread"); 398 return __libc_thr_create_stub(thread, attr, startfunc, arg); 399 } 400 401 /* 402 * It's okay to check this without a lock because there can 403 * only be one thread before it becomes true. 404 */ 405 if (pthread__started == 0) { 406 pthread__start(); 407 pthread__started = 1; 408 } 409 410 if (attr == NULL) 411 nattr = pthread_default_attr; 412 else if (attr->pta_magic == PT_ATTR_MAGIC) 413 nattr = *attr; 414 else 415 return EINVAL; 416 417 /* Fetch misc. attributes from the attr structure. */ 418 name = NULL; 419 if ((p = nattr.pta_private) != NULL) 420 if (p->ptap_name[0] != '\0') 421 if ((name = strdup(p->ptap_name)) == NULL) 422 return ENOMEM; 423 424 newthread = NULL; 425 426 /* 427 * Try to reclaim a dead thread. 428 */ 429 if (!PTQ_EMPTY(&pthread__deadqueue)) { 430 pthread_mutex_lock(&pthread__deadqueue_lock); 431 PTQ_FOREACH(newthread, &pthread__deadqueue, pt_deadq) { 432 /* Still running? */ 433 if (newthread->pt_lwpctl->lc_curcpu == 434 LWPCTL_CPU_EXITED || 435 (_lwp_kill(newthread->pt_lid, 0) == -1 && 436 errno == ESRCH)) 437 break; 438 } 439 if (newthread) 440 PTQ_REMOVE(&pthread__deadqueue, newthread, pt_deadq); 441 pthread_mutex_unlock(&pthread__deadqueue_lock); 442 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 443 if (newthread && newthread->pt_tls) { 444 _rtld_tls_free(newthread->pt_tls); 445 newthread->pt_tls = NULL; 446 } 447 #endif 448 } 449 450 /* 451 * If necessary set up a stack, allocate space for a pthread_st, 452 * and initialize it. 453 */ 454 if (newthread == NULL) { 455 newthread = malloc(sizeof(*newthread)); 456 if (newthread == NULL) { 457 free(name); 458 return ENOMEM; 459 } 460 newthread->pt_stack_allocated = false; 461 462 if (pthread__getstack(newthread, attr)) { 463 free(newthread); 464 free(name); 465 return ENOMEM; 466 } 467 468 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 469 newthread->pt_tls = NULL; 470 #endif 471 472 /* Add to list of all threads. */ 473 pthread_rwlock_wrlock(&pthread__alltree_lock); 474 PTQ_INSERT_TAIL(&pthread__allqueue, newthread, pt_allq); 475 (void)rb_tree_insert_node(&pthread__alltree, newthread); 476 pthread_rwlock_unlock(&pthread__alltree_lock); 477 478 /* Will be reset by the thread upon exit. */ 479 pthread__initthread(newthread); 480 } else { 481 if (pthread__getstack(newthread, attr)) { 482 pthread_mutex_lock(&pthread__deadqueue_lock); 483 PTQ_INSERT_TAIL(&pthread__deadqueue, newthread, pt_deadq); 484 pthread_mutex_unlock(&pthread__deadqueue_lock); 485 return ENOMEM; 486 } 487 } 488 489 /* 490 * Create the new LWP. 491 */ 492 pthread__scrubthread(newthread, name, nattr.pta_flags); 493 newthread->pt_func = startfunc; 494 newthread->pt_arg = arg; 495 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) 496 private_area = newthread->pt_tls = _rtld_tls_allocate(); 497 newthread->pt_tls->tcb_pthread = newthread; 498 #else 499 private_area = newthread; 500 #endif 501 502 flag = LWP_DETACHED; 503 if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0 || 504 (nattr.pta_flags & PT_FLAG_EXPLICIT_SCHED) != 0) 505 flag |= LWP_SUSPENDED; 506 507 ret = pthread__makelwp(pthread__create_tramp, newthread, private_area, 508 newthread->pt_stack.ss_sp, newthread->pt_stack.ss_size, 509 flag, &newthread->pt_lid); 510 if (ret != 0) { 511 ret = errno; 512 pthread_mutex_lock(&newthread->pt_lock); 513 /* Will unlock and free name. */ 514 pthread__reap(newthread); 515 return ret; 516 } 517 518 if ((nattr.pta_flags & PT_FLAG_EXPLICIT_SCHED) != 0) { 519 if (p != NULL) { 520 (void)pthread_setschedparam(newthread, p->ptap_policy, 521 &p->ptap_sp); 522 } 523 if ((newthread->pt_flags & PT_FLAG_SUSPENDED) == 0) { 524 (void)_lwp_continue(newthread->pt_lid); 525 } 526 } 527 528 *thread = newthread; 529 530 return 0; 531 } 532 533 534 __dead static void 535 pthread__create_tramp(void *cookie) 536 { 537 pthread_t self; 538 void *retval; 539 540 self = cookie; 541 542 /* 543 * Throw away some stack in a feeble attempt to reduce cache 544 * thrash. May help for SMT processors. XXX We should not 545 * be allocating stacks on fixed 2MB boundaries. Needs a 546 * thread register or decent thread local storage. 547 * 548 * Note that we may race with the kernel in _lwp_create(), 549 * and so pt_lid can be unset at this point, but we don't 550 * care. 551 */ 552 (void)alloca(((unsigned)self->pt_lid & 7) << 8); 553 554 if (self->pt_name != NULL) { 555 pthread_mutex_lock(&self->pt_lock); 556 if (self->pt_name != NULL) 557 (void)_lwp_setname(0, self->pt_name); 558 pthread_mutex_unlock(&self->pt_lock); 559 } 560 561 if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) { 562 err(1, "_lwp_ctl"); 563 } 564 565 retval = (*self->pt_func)(self->pt_arg); 566 567 pthread_exit(retval); 568 569 /*NOTREACHED*/ 570 pthread__abort(); 571 } 572 573 int 574 pthread_suspend_np(pthread_t thread) 575 { 576 pthread_t self; 577 578 self = pthread__self(); 579 if (self == thread) { 580 return EDEADLK; 581 } 582 if (pthread__find(thread) != 0) 583 return ESRCH; 584 if (_lwp_suspend(thread->pt_lid) == 0) 585 return 0; 586 return errno; 587 } 588 589 int 590 pthread_resume_np(pthread_t thread) 591 { 592 593 if (pthread__find(thread) != 0) 594 return ESRCH; 595 if (_lwp_continue(thread->pt_lid) == 0) 596 return 0; 597 return errno; 598 } 599 600 void 601 pthread_exit(void *retval) 602 { 603 pthread_t self; 604 struct pt_clean_t *cleanup; 605 char *name; 606 607 if (__predict_false(__uselibcstub)) { 608 __libc_thr_exit_stub(retval); 609 goto out; 610 } 611 612 self = pthread__self(); 613 614 /* Disable cancellability. */ 615 pthread_mutex_lock(&self->pt_lock); 616 self->pt_flags |= PT_FLAG_CS_DISABLED; 617 self->pt_cancel = 0; 618 619 /* Call any cancellation cleanup handlers */ 620 if (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 621 pthread_mutex_unlock(&self->pt_lock); 622 while (!PTQ_EMPTY(&self->pt_cleanup_stack)) { 623 cleanup = PTQ_FIRST(&self->pt_cleanup_stack); 624 PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next); 625 (*cleanup->ptc_cleanup)(cleanup->ptc_arg); 626 } 627 pthread_mutex_lock(&self->pt_lock); 628 } 629 630 /* Perform cleanup of thread-specific data */ 631 pthread__destroy_tsd(self); 632 633 /* Signal our exit. */ 634 self->pt_exitval = retval; 635 if (self->pt_flags & PT_FLAG_DETACHED) { 636 self->pt_state = PT_STATE_DEAD; 637 name = self->pt_name; 638 self->pt_name = NULL; 639 pthread_mutex_unlock(&self->pt_lock); 640 if (name != NULL) 641 free(name); 642 pthread_mutex_lock(&pthread__deadqueue_lock); 643 PTQ_INSERT_TAIL(&pthread__deadqueue, self, pt_deadq); 644 pthread_mutex_unlock(&pthread__deadqueue_lock); 645 _lwp_exit(); 646 } else { 647 self->pt_state = PT_STATE_ZOMBIE; 648 pthread_cond_broadcast(&self->pt_joiners); 649 pthread_mutex_unlock(&self->pt_lock); 650 /* Note: name will be freed by the joiner. */ 651 _lwp_exit(); 652 } 653 654 out: 655 /*NOTREACHED*/ 656 pthread__abort(); 657 exit(1); 658 } 659 660 661 int 662 pthread_join(pthread_t thread, void **valptr) 663 { 664 pthread_t self; 665 int error; 666 667 self = pthread__self(); 668 669 if (pthread__find(thread) != 0) 670 return ESRCH; 671 672 if (thread->pt_magic != PT_MAGIC) 673 return EINVAL; 674 675 if (thread == self) 676 return EDEADLK; 677 678 self->pt_droplock = &thread->pt_lock; 679 pthread_mutex_lock(&thread->pt_lock); 680 for (;;) { 681 if (thread->pt_state == PT_STATE_ZOMBIE) 682 break; 683 if (thread->pt_state == PT_STATE_DEAD) { 684 pthread_mutex_unlock(&thread->pt_lock); 685 self->pt_droplock = NULL; 686 return ESRCH; 687 } 688 if ((thread->pt_flags & PT_FLAG_DETACHED) != 0) { 689 pthread_mutex_unlock(&thread->pt_lock); 690 self->pt_droplock = NULL; 691 return EINVAL; 692 } 693 error = pthread_cond_wait(&thread->pt_joiners, 694 &thread->pt_lock); 695 if (error != 0) { 696 pthread__errorfunc(__FILE__, __LINE__, 697 __func__, "unexpected return from cond_wait()"); 698 } 699 700 } 701 pthread__testcancel(self); 702 if (valptr != NULL) 703 *valptr = thread->pt_exitval; 704 /* pthread__reap() will drop the lock. */ 705 pthread__reap(thread); 706 self->pt_droplock = NULL; 707 708 return 0; 709 } 710 711 static void 712 pthread__reap(pthread_t thread) 713 { 714 char *name; 715 716 name = thread->pt_name; 717 thread->pt_name = NULL; 718 thread->pt_state = PT_STATE_DEAD; 719 pthread_mutex_unlock(&thread->pt_lock); 720 721 pthread_mutex_lock(&pthread__deadqueue_lock); 722 PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_deadq); 723 pthread_mutex_unlock(&pthread__deadqueue_lock); 724 725 if (name != NULL) 726 free(name); 727 } 728 729 int 730 pthread_equal(pthread_t t1, pthread_t t2) 731 { 732 if (__predict_false(__uselibcstub)) 733 return __libc_thr_equal_stub(t1, t2); 734 735 /* Nothing special here. */ 736 return (t1 == t2); 737 } 738 739 740 int 741 pthread_detach(pthread_t thread) 742 { 743 744 if (pthread__find(thread) != 0) 745 return ESRCH; 746 747 if (thread->pt_magic != PT_MAGIC) 748 return EINVAL; 749 750 pthread_mutex_lock(&thread->pt_lock); 751 thread->pt_flags |= PT_FLAG_DETACHED; 752 if (thread->pt_state == PT_STATE_ZOMBIE) { 753 /* pthread__reap() will drop the lock. */ 754 pthread__reap(thread); 755 } else { 756 /* 757 * Not valid for threads to be waiting in 758 * pthread_join() (there are intractable 759 * sync issues from the application 760 * perspective), but give those threads 761 * a chance anyway. 762 */ 763 pthread_cond_broadcast(&thread->pt_joiners); 764 pthread_mutex_unlock(&thread->pt_lock); 765 } 766 767 return 0; 768 } 769 770 771 int 772 pthread_getname_np(pthread_t thread, char *name, size_t len) 773 { 774 775 if (pthread__find(thread) != 0) 776 return ESRCH; 777 778 if (thread->pt_magic != PT_MAGIC) 779 return EINVAL; 780 781 pthread_mutex_lock(&thread->pt_lock); 782 if (thread->pt_name == NULL) 783 name[0] = '\0'; 784 else 785 strlcpy(name, thread->pt_name, len); 786 pthread_mutex_unlock(&thread->pt_lock); 787 788 return 0; 789 } 790 791 792 int 793 pthread_setname_np(pthread_t thread, const char *name, void *arg) 794 { 795 char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP]; 796 int namelen; 797 798 if (pthread__find(thread) != 0) 799 return ESRCH; 800 801 if (thread->pt_magic != PT_MAGIC) 802 return EINVAL; 803 804 namelen = snprintf(newname, sizeof(newname), name, arg); 805 if (namelen >= PTHREAD_MAX_NAMELEN_NP) 806 return EINVAL; 807 808 cp = strdup(newname); 809 if (cp == NULL) 810 return ENOMEM; 811 812 pthread_mutex_lock(&thread->pt_lock); 813 oldname = thread->pt_name; 814 thread->pt_name = cp; 815 (void)_lwp_setname(thread->pt_lid, cp); 816 pthread_mutex_unlock(&thread->pt_lock); 817 818 if (oldname != NULL) 819 free(oldname); 820 821 return 0; 822 } 823 824 825 826 /* 827 * XXX There should be a way for applications to use the efficent 828 * inline version, but there are opacity/namespace issues. 829 */ 830 pthread_t 831 pthread_self(void) 832 { 833 if (__predict_false(__uselibcstub)) 834 return (pthread_t)__libc_thr_self_stub(); 835 836 return pthread__self(); 837 } 838 839 840 int 841 pthread_cancel(pthread_t thread) 842 { 843 844 if (pthread__find(thread) != 0) 845 return ESRCH; 846 pthread_mutex_lock(&thread->pt_lock); 847 thread->pt_flags |= PT_FLAG_CS_PENDING; 848 if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) { 849 thread->pt_cancel = 1; 850 pthread_mutex_unlock(&thread->pt_lock); 851 _lwp_wakeup(thread->pt_lid); 852 } else 853 pthread_mutex_unlock(&thread->pt_lock); 854 855 return 0; 856 } 857 858 859 int 860 pthread_setcancelstate(int state, int *oldstate) 861 { 862 pthread_t self; 863 int retval; 864 865 if (__predict_false(__uselibcstub)) 866 return __libc_thr_setcancelstate_stub(state, oldstate); 867 868 self = pthread__self(); 869 retval = 0; 870 871 pthread_mutex_lock(&self->pt_lock); 872 873 if (oldstate != NULL) { 874 if (self->pt_flags & PT_FLAG_CS_DISABLED) 875 *oldstate = PTHREAD_CANCEL_DISABLE; 876 else 877 *oldstate = PTHREAD_CANCEL_ENABLE; 878 } 879 880 if (state == PTHREAD_CANCEL_DISABLE) { 881 self->pt_flags |= PT_FLAG_CS_DISABLED; 882 if (self->pt_cancel) { 883 self->pt_flags |= PT_FLAG_CS_PENDING; 884 self->pt_cancel = 0; 885 } 886 } else if (state == PTHREAD_CANCEL_ENABLE) { 887 self->pt_flags &= ~PT_FLAG_CS_DISABLED; 888 /* 889 * If a cancellation was requested while cancellation 890 * was disabled, note that fact for future 891 * cancellation tests. 892 */ 893 if (self->pt_flags & PT_FLAG_CS_PENDING) { 894 self->pt_cancel = 1; 895 /* This is not a deferred cancellation point. */ 896 if (self->pt_flags & PT_FLAG_CS_ASYNC) { 897 pthread_mutex_unlock(&self->pt_lock); 898 pthread__cancelled(); 899 } 900 } 901 } else 902 retval = EINVAL; 903 904 pthread_mutex_unlock(&self->pt_lock); 905 906 return retval; 907 } 908 909 910 int 911 pthread_setcanceltype(int type, int *oldtype) 912 { 913 pthread_t self; 914 int retval; 915 916 self = pthread__self(); 917 retval = 0; 918 919 pthread_mutex_lock(&self->pt_lock); 920 921 if (oldtype != NULL) { 922 if (self->pt_flags & PT_FLAG_CS_ASYNC) 923 *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS; 924 else 925 *oldtype = PTHREAD_CANCEL_DEFERRED; 926 } 927 928 if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { 929 self->pt_flags |= PT_FLAG_CS_ASYNC; 930 if (self->pt_cancel) { 931 pthread_mutex_unlock(&self->pt_lock); 932 pthread__cancelled(); 933 } 934 } else if (type == PTHREAD_CANCEL_DEFERRED) 935 self->pt_flags &= ~PT_FLAG_CS_ASYNC; 936 else 937 retval = EINVAL; 938 939 pthread_mutex_unlock(&self->pt_lock); 940 941 return retval; 942 } 943 944 945 void 946 pthread_testcancel(void) 947 { 948 pthread_t self; 949 950 self = pthread__self(); 951 if (self->pt_cancel) 952 pthread__cancelled(); 953 } 954 955 956 /* 957 * POSIX requires that certain functions return an error rather than 958 * invoking undefined behavior even when handed completely bogus 959 * pthread_t values, e.g. stack garbage. 960 */ 961 int 962 pthread__find(pthread_t id) 963 { 964 pthread_t target; 965 int error; 966 967 pthread_rwlock_rdlock(&pthread__alltree_lock); 968 target = rb_tree_find_node(&pthread__alltree, id); 969 error = (target && target->pt_state != PT_STATE_DEAD) ? 0 : ESRCH; 970 pthread_rwlock_unlock(&pthread__alltree_lock); 971 972 return error; 973 } 974 975 976 void 977 pthread__testcancel(pthread_t self) 978 { 979 980 if (self->pt_cancel) 981 pthread__cancelled(); 982 } 983 984 985 void 986 pthread__cancelled(void) 987 { 988 pthread_mutex_t *droplock; 989 pthread_t self; 990 991 self = pthread__self(); 992 droplock = self->pt_droplock; 993 self->pt_droplock = NULL; 994 995 if (droplock != NULL && pthread_mutex_held_np(droplock)) 996 pthread_mutex_unlock(droplock); 997 998 pthread_exit(PTHREAD_CANCELED); 999 } 1000 1001 1002 void 1003 pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store) 1004 { 1005 pthread_t self; 1006 struct pt_clean_t *entry; 1007 1008 self = pthread__self(); 1009 entry = store; 1010 entry->ptc_cleanup = cleanup; 1011 entry->ptc_arg = arg; 1012 PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next); 1013 } 1014 1015 1016 void 1017 pthread__cleanup_pop(int ex, void *store) 1018 { 1019 pthread_t self; 1020 struct pt_clean_t *entry; 1021 1022 self = pthread__self(); 1023 entry = store; 1024 1025 PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next); 1026 if (ex) 1027 (*entry->ptc_cleanup)(entry->ptc_arg); 1028 } 1029 1030 1031 int * 1032 pthread__errno(void) 1033 { 1034 pthread_t self; 1035 1036 if (__predict_false(__uselibcstub)) { 1037 pthread__errorfunc(__FILE__, __LINE__, __func__, 1038 "pthread__errno() requires linking with -lpthread"); 1039 return __libc_thr_errno_stub(); 1040 } 1041 1042 self = pthread__self(); 1043 1044 return &(self->pt_errno); 1045 } 1046 1047 ssize_t _sys_write(int, const void *, size_t); 1048 1049 void 1050 pthread__assertfunc(const char *file, int line, const char *function, 1051 const char *expr) 1052 { 1053 char buf[1024]; 1054 int len; 1055 1056 /* 1057 * snprintf should not acquire any locks, or we could 1058 * end up deadlocked if the assert caller held locks. 1059 */ 1060 len = snprintf(buf, 1024, 1061 "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n", 1062 expr, file, line, 1063 function ? ", function \"" : "", 1064 function ? function : "", 1065 function ? "\"" : ""); 1066 1067 _sys_write(STDERR_FILENO, buf, (size_t)len); 1068 (void)kill(getpid(), SIGABRT); 1069 1070 _exit(1); 1071 } 1072 1073 1074 void 1075 pthread__errorfunc(const char *file, int line, const char *function, 1076 const char *msg) 1077 { 1078 char buf[1024]; 1079 size_t len; 1080 1081 if (pthread__diagassert == 0) 1082 return; 1083 1084 /* 1085 * snprintf should not acquire any locks, or we could 1086 * end up deadlocked if the assert caller held locks. 1087 */ 1088 len = snprintf(buf, 1024, 1089 "%s: Error detected by libpthread: %s.\n" 1090 "Detected by file \"%s\", line %d%s%s%s.\n" 1091 "See pthread(3) for information.\n", 1092 getprogname(), msg, file, line, 1093 function ? ", function \"" : "", 1094 function ? function : "", 1095 function ? "\"" : ""); 1096 1097 if (pthread__diagassert & DIAGASSERT_STDERR) 1098 _sys_write(STDERR_FILENO, buf, len); 1099 1100 if (pthread__diagassert & DIAGASSERT_SYSLOG) 1101 syslog(LOG_DEBUG | LOG_USER, "%s", buf); 1102 1103 if (pthread__diagassert & DIAGASSERT_ABORT) { 1104 (void)kill(getpid(), SIGABRT); 1105 _exit(1); 1106 } 1107 } 1108 1109 /* 1110 * Thread park/unpark operations. The kernel operations are 1111 * modelled after a brief description from "Multithreading in 1112 * the Solaris Operating Environment": 1113 * 1114 * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf 1115 */ 1116 1117 #define OOPS(msg) \ 1118 pthread__errorfunc(__FILE__, __LINE__, __func__, msg) 1119 1120 int 1121 pthread__park(pthread_t self, pthread_mutex_t *lock, 1122 pthread_queue_t *queue, const struct timespec *abstime, 1123 int cancelpt, const void *hint) 1124 { 1125 int rv, error; 1126 void *obj; 1127 1128 /* 1129 * For non-interlocked release of mutexes we need a store 1130 * barrier before incrementing pt_blocking away from zero. 1131 * This is provided by pthread_mutex_unlock(). 1132 */ 1133 self->pt_willpark = 1; 1134 pthread_mutex_unlock(lock); 1135 self->pt_willpark = 0; 1136 self->pt_blocking++; 1137 1138 /* 1139 * Wait until we are awoken by a pending unpark operation, 1140 * a signal, an unpark posted after we have gone asleep, 1141 * or an expired timeout. 1142 * 1143 * It is fine to test the value of pt_sleepobj without 1144 * holding any locks, because: 1145 * 1146 * o Only the blocking thread (this thread) ever sets them 1147 * to a non-NULL value. 1148 * 1149 * o Other threads may set them NULL, but if they do so they 1150 * must also make this thread return from _lwp_park. 1151 * 1152 * o _lwp_park, _lwp_unpark and _lwp_unpark_all are system 1153 * calls and all make use of spinlocks in the kernel. So 1154 * these system calls act as full memory barriers, and will 1155 * ensure that the calling CPU's store buffers are drained. 1156 * In combination with the spinlock release before unpark, 1157 * this means that modification of pt_sleepobj/onq by another 1158 * thread will become globally visible before that thread 1159 * schedules an unpark operation on this thread. 1160 * 1161 * Note: the test in the while() statement dodges the park op if 1162 * we have already been awoken, unless there is another thread to 1163 * awaken. This saves a syscall - if we were already awakened, 1164 * the next call to _lwp_park() would need to return early in order 1165 * to eat the previous wakeup. 1166 */ 1167 rv = 0; 1168 do { 1169 /* 1170 * If we deferred unparking a thread, arrange to 1171 * have _lwp_park() restart it before blocking. 1172 */ 1173 error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, abstime, 1174 self->pt_unpark, hint, hint); 1175 self->pt_unpark = 0; 1176 if (error != 0) { 1177 switch (rv = errno) { 1178 case EINTR: 1179 case EALREADY: 1180 rv = 0; 1181 break; 1182 case ETIMEDOUT: 1183 break; 1184 default: 1185 OOPS("_lwp_park failed"); 1186 break; 1187 } 1188 } 1189 /* Check for cancellation. */ 1190 if (cancelpt && self->pt_cancel) 1191 rv = EINTR; 1192 } while (self->pt_sleepobj != NULL && rv == 0); 1193 1194 /* 1195 * If we have been awoken early but are still on the queue, 1196 * then remove ourself. Again, it's safe to do the test 1197 * without holding any locks. 1198 */ 1199 if (__predict_false(self->pt_sleepobj != NULL)) { 1200 pthread_mutex_lock(lock); 1201 if ((obj = self->pt_sleepobj) != NULL) { 1202 PTQ_REMOVE(queue, self, pt_sleep); 1203 self->pt_sleepobj = NULL; 1204 if (obj != NULL && self->pt_early != NULL) 1205 (*self->pt_early)(obj); 1206 } 1207 pthread_mutex_unlock(lock); 1208 } 1209 self->pt_early = NULL; 1210 self->pt_blocking--; 1211 membar_sync(); 1212 1213 return rv; 1214 } 1215 1216 void 1217 pthread__unpark(pthread_queue_t *queue, pthread_t self, 1218 pthread_mutex_t *interlock) 1219 { 1220 pthread_t target; 1221 u_int max; 1222 size_t nwaiters; 1223 1224 max = pthread__unpark_max; 1225 nwaiters = self->pt_nwaiters; 1226 target = PTQ_FIRST(queue); 1227 if (nwaiters == max) { 1228 /* Overflow. */ 1229 (void)_lwp_unpark_all(self->pt_waiters, nwaiters, 1230 __UNVOLATILE(&interlock->ptm_waiters)); 1231 nwaiters = 0; 1232 } 1233 target->pt_sleepobj = NULL; 1234 self->pt_waiters[nwaiters++] = target->pt_lid; 1235 PTQ_REMOVE(queue, target, pt_sleep); 1236 self->pt_nwaiters = nwaiters; 1237 pthread__mutex_deferwake(self, interlock); 1238 } 1239 1240 void 1241 pthread__unpark_all(pthread_queue_t *queue, pthread_t self, 1242 pthread_mutex_t *interlock) 1243 { 1244 pthread_t target; 1245 u_int max; 1246 size_t nwaiters; 1247 1248 max = pthread__unpark_max; 1249 nwaiters = self->pt_nwaiters; 1250 PTQ_FOREACH(target, queue, pt_sleep) { 1251 if (nwaiters == max) { 1252 /* Overflow. */ 1253 (void)_lwp_unpark_all(self->pt_waiters, nwaiters, 1254 __UNVOLATILE(&interlock->ptm_waiters)); 1255 nwaiters = 0; 1256 } 1257 target->pt_sleepobj = NULL; 1258 self->pt_waiters[nwaiters++] = target->pt_lid; 1259 } 1260 self->pt_nwaiters = nwaiters; 1261 PTQ_INIT(queue); 1262 pthread__mutex_deferwake(self, interlock); 1263 } 1264 1265 #undef OOPS 1266 1267 static void 1268 pthread__initmainstack(void) 1269 { 1270 struct rlimit slimit; 1271 const AuxInfo *aux; 1272 size_t size; 1273 1274 _DIAGASSERT(_dlauxinfo() != NULL); 1275 1276 if (getrlimit(RLIMIT_STACK, &slimit) == -1) 1277 err(1, "Couldn't get stack resource consumption limits"); 1278 size = slimit.rlim_cur; 1279 pthread__main.pt_stack.ss_size = size; 1280 1281 for (aux = _dlauxinfo(); aux->a_type != AT_NULL; ++aux) { 1282 if (aux->a_type == AT_STACKBASE) { 1283 pthread__main.pt_stack.ss_sp = (void *)aux->a_v; 1284 #ifdef __MACHINE_STACK_GROWS_UP 1285 pthread__main.pt_stack.ss_sp = (void *)aux->a_v; 1286 #else 1287 pthread__main.pt_stack.ss_sp = (char *)aux->a_v - size; 1288 #endif 1289 break; 1290 } 1291 } 1292 } 1293 1294 /* 1295 * Set up the slightly special stack for the "initial" thread, which 1296 * runs on the normal system stack, and thus gets slightly different 1297 * treatment. 1298 */ 1299 static void 1300 pthread__initmain(pthread_t *newt) 1301 { 1302 char *value; 1303 1304 pthread__initmainstack(); 1305 1306 value = pthread__getenv("PTHREAD_STACKSIZE"); 1307 if (value != NULL) { 1308 pthread__stacksize = atoi(value) * 1024; 1309 if (pthread__stacksize > pthread__main.pt_stack.ss_size) 1310 pthread__stacksize = pthread__main.pt_stack.ss_size; 1311 } 1312 if (pthread__stacksize == 0) 1313 pthread__stacksize = pthread__main.pt_stack.ss_size; 1314 pthread__stacksize += pthread__pagesize - 1; 1315 pthread__stacksize &= ~(pthread__pagesize - 1); 1316 if (pthread__stacksize < 4 * pthread__pagesize) 1317 errx(1, "Stacksize limit is too low, minimum %zd kbyte.", 1318 4 * pthread__pagesize / 1024); 1319 1320 *newt = &pthread__main; 1321 #if defined(_PTHREAD_GETTCB_EXT) 1322 pthread__main.pt_tls = _PTHREAD_GETTCB_EXT(); 1323 #elif defined(__HAVE___LWP_GETTCB_FAST) 1324 pthread__main.pt_tls = __lwp_gettcb_fast(); 1325 #else 1326 pthread__main.pt_tls = _lwp_getprivate(); 1327 #endif 1328 pthread__main.pt_tls->tcb_pthread = &pthread__main; 1329 } 1330 1331 static signed int 1332 /*ARGSUSED*/ 1333 pthread__cmp(void *ctx, const void *n1, const void *n2) 1334 { 1335 const uintptr_t p1 = (const uintptr_t)n1; 1336 const uintptr_t p2 = (const uintptr_t)n2; 1337 1338 if (p1 < p2) 1339 return -1; 1340 if (p1 > p2) 1341 return 1; 1342 return 0; 1343 } 1344 1345 /* Because getenv() wants to use locks. */ 1346 char * 1347 pthread__getenv(const char *name) 1348 { 1349 extern char **environ; 1350 size_t l_name, offset; 1351 1352 l_name = strlen(name); 1353 for (offset = 0; environ[offset] != NULL; offset++) { 1354 if (strncmp(name, environ[offset], l_name) == 0 && 1355 environ[offset][l_name] == '=') { 1356 return environ[offset] + l_name + 1; 1357 } 1358 } 1359 1360 return NULL; 1361 } 1362 1363 pthread_mutex_t * 1364 pthread__hashlock(volatile const void *p) 1365 { 1366 uintptr_t v; 1367 1368 v = (uintptr_t)p; 1369 return &hashlocks[((v >> 9) ^ (v >> 3)) & (NHASHLOCK - 1)].mutex; 1370 } 1371 1372 int 1373 pthread__checkpri(int pri) 1374 { 1375 static int havepri; 1376 static long min, max; 1377 1378 if (!havepri) { 1379 min = sysconf(_SC_SCHED_PRI_MIN); 1380 max = sysconf(_SC_SCHED_PRI_MAX); 1381 havepri = 1; 1382 } 1383 return (pri < min || pri > max) ? EINVAL : 0; 1384 } 1385