1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 */ 34 35 #include "namespace.h" 36 #include <machine/tls.h> 37 #include <errno.h> 38 #include <stdlib.h> 39 #include <string.h> 40 #include <sys/queue.h> 41 #include <pthread.h> 42 #include "un-namespace.h" 43 44 #include "thr_private.h" 45 46 #ifdef _PTHREADS_DEBUGGING 47 48 #include <stdio.h> 49 #include <stdarg.h> 50 #include <sys/file.h> 51 52 #endif 53 54 #if defined(_PTHREADS_INVARIANTS) 55 #define MUTEX_INIT_LINK(m) do { \ 56 (m)->m_qe.tqe_prev = NULL; \ 57 (m)->m_qe.tqe_next = NULL; \ 58 } while (0) 59 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 60 if ((m)->m_qe.tqe_prev == NULL) \ 61 PANIC("mutex is not on list"); \ 62 } while (0) 63 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 64 if (((m)->m_qe.tqe_prev != NULL) || \ 65 ((m)->m_qe.tqe_next != NULL)) \ 66 PANIC("mutex is on list"); \ 67 } while (0) 68 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \ 69 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \ 70 "thread in syncq when it shouldn't be."); \ 71 } while (0); 72 #else 73 #define MUTEX_INIT_LINK(m) 74 #define MUTEX_ASSERT_IS_OWNED(m) 75 #define MUTEX_ASSERT_NOT_OWNED(m) 76 #define THR_ASSERT_NOT_IN_SYNCQ(thr) 77 #endif 78 79 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 80 #define MUTEX_DESTROY(m) do { \ 81 __free(m); \ 82 } while (0) 83 84 umtx_t _mutex_static_lock; 85 86 #ifdef _PTHREADS_DEBUGGING 87 88 static 89 void 90 mutex_log(const char *ctl, ...) 91 { 92 char buf[256]; 93 va_list va; 94 size_t len; 95 96 va_start(va, ctl); 97 len = vsnprintf(buf, sizeof(buf), ctl, va); 98 va_end(va); 99 _thr_log(buf, len); 100 } 101 102 #else 103 104 static __inline 105 void 106 mutex_log(const char *ctl __unused, ...) 107 { 108 } 109 110 #endif 111 112 #ifdef _PTHREADS_DEBUGGING2 113 114 static void 115 mutex_log2(struct pthread *curthread, struct pthread_mutex *m, int op) 116 { 117 if (curthread) { 118 if (curthread->tid < 32) 119 m->m_lastop[curthread->tid] = 120 (__sys_getpid() << 16) | op; 121 } else { 122 m->m_lastop[0] = 123 (__sys_getpid() << 16) | op; 124 } 125 } 126 127 #else 128 129 static __inline 130 void 131 mutex_log2(struct pthread *curthread __unused, 132 struct pthread_mutex *m __unused, int op __unused) 133 { 134 } 135 136 #endif 137 138 /* 139 * Prototypes 140 */ 141 static int mutex_self_trylock(pthread_mutex_t); 142 static int mutex_self_lock(pthread_mutex_t, 143 const struct timespec *abstime); 144 static int mutex_unlock_common(pthread_mutex_t *); 145 146 int __pthread_mutex_init(pthread_mutex_t *mutex, 147 const pthread_mutexattr_t *mutex_attr); 148 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 149 int __pthread_mutex_lock(pthread_mutex_t *mutex); 150 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 151 const struct timespec *abs_timeout); 152 153 static int 154 mutex_check_attr(const struct pthread_mutex_attr *attr) 155 { 156 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 157 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 158 return (EINVAL); 159 if (attr->m_protocol < PTHREAD_PRIO_NONE || 160 attr->m_protocol > PTHREAD_PRIO_PROTECT) 161 return (EINVAL); 162 return (0); 163 } 164 165 static void 166 mutex_init_body(struct pthread_mutex *pmutex, 167 const struct pthread_mutex_attr *attr, int private) 168 { 169 _thr_umtx_init(&pmutex->m_lock); 170 pmutex->m_type = attr->m_type; 171 pmutex->m_protocol = attr->m_protocol; 172 TAILQ_INIT(&pmutex->m_queue); 173 mutex_log2(tls_get_curthread(), pmutex, 32); 174 pmutex->m_owner = NULL; 175 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 176 if (private) 177 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 178 pmutex->m_count = 0; 179 pmutex->m_refcount = 0; 180 if (attr->m_protocol == PTHREAD_PRIO_PROTECT) 181 pmutex->m_prio = attr->m_ceiling; 182 else 183 pmutex->m_prio = -1; 184 pmutex->m_saved_prio = 0; 185 MUTEX_INIT_LINK(pmutex); 186 } 187 188 static int 189 mutex_init(pthread_mutex_t *mutex, 190 const pthread_mutexattr_t *mutex_attr, int private) 191 { 192 const struct pthread_mutex_attr *attr; 193 struct pthread_mutex *pmutex; 194 int error; 195 196 if (mutex_attr == NULL) { 197 attr = &_pthread_mutexattr_default; 198 } else { 199 attr = *mutex_attr; 200 error = mutex_check_attr(attr); 201 if (error != 0) 202 return (error); 203 } 204 205 pmutex = __malloc(sizeof(struct pthread_mutex)); 206 if (pmutex == NULL) 207 return (ENOMEM); 208 mutex_init_body(pmutex, attr, private); 209 *mutex = pmutex; 210 return (0); 211 } 212 213 static int 214 init_static(struct pthread *thread, pthread_mutex_t *mutex) 215 { 216 int ret; 217 218 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 219 220 if (*mutex == NULL) 221 ret = mutex_init(mutex, NULL, 0); 222 else 223 ret = 0; 224 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 225 226 return (ret); 227 } 228 229 static int 230 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 231 { 232 int ret; 233 234 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 235 236 if (*mutex == NULL) 237 ret = mutex_init(mutex, NULL, 1); 238 else 239 ret = 0; 240 241 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 242 243 return (ret); 244 } 245 246 int 247 _pthread_mutex_init(pthread_mutex_t * __restrict mutex, 248 const pthread_mutexattr_t * __restrict mutex_attr) 249 { 250 return mutex_init(mutex, mutex_attr, 1); 251 } 252 253 int 254 __pthread_mutex_init(pthread_mutex_t *mutex, 255 const pthread_mutexattr_t *mutex_attr) 256 { 257 return mutex_init(mutex, mutex_attr, 0); 258 } 259 260 #if 0 261 int 262 _mutex_reinit(pthread_mutex_t *mutexp) 263 { 264 pthread_mutex_t mutex = *mutexp; 265 266 _thr_umtx_init(&mutex->m_lock); 267 TAILQ_INIT(&mutex->m_queue); 268 MUTEX_INIT_LINK(mutex); 269 mutex_log2(tls_get_curthread(), mutex, 33); 270 mutex->m_owner = NULL; 271 mutex->m_count = 0; 272 mutex->m_refcount = 0; 273 mutex->m_prio = 0; 274 mutex->m_saved_prio = 0; 275 276 return (0); 277 } 278 #endif 279 280 void 281 _mutex_fork(struct pthread *curthread) 282 { 283 struct pthread_mutex *m; 284 285 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 286 m->m_lock = UMTX_LOCKED; 287 } 288 289 int 290 _pthread_mutex_destroy(pthread_mutex_t *mutex) 291 { 292 struct pthread *curthread = tls_get_curthread(); 293 pthread_mutex_t m; 294 int ret = 0; 295 296 if (mutex == NULL) { 297 ret = EINVAL; 298 } else if (*mutex == NULL) { 299 ret = 0; 300 } else { 301 /* 302 * Try to lock the mutex structure, we only need to 303 * try once, if failed, the mutex is in use. 304 */ 305 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 306 if (ret) 307 return (ret); 308 309 /* 310 * Check mutex other fields to see if this mutex is 311 * in use. Mostly for prority mutex types, or there 312 * are condition variables referencing it. 313 */ 314 if (((*mutex)->m_owner != NULL) || 315 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 316 ((*mutex)->m_refcount != 0)) { 317 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock); 318 ret = EBUSY; 319 } else { 320 /* 321 * Save a pointer to the mutex so it can be free'd 322 * and set the caller's pointer to NULL: 323 */ 324 m = *mutex; 325 *mutex = NULL; 326 327 /* Unlock the mutex structure: */ 328 THR_UMTX_UNLOCK(curthread, &m->m_lock); 329 330 /* 331 * Free the memory allocated for the mutex 332 * structure: 333 */ 334 MUTEX_ASSERT_NOT_OWNED(m); 335 MUTEX_DESTROY(m); 336 } 337 } 338 339 /* Return the completion status: */ 340 return (ret); 341 } 342 343 static int 344 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 345 { 346 struct pthread_mutex *m; 347 int ret; 348 349 m = *mutex; 350 mutex_log("mutex_lock_trylock_common %p\n", m); 351 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock); 352 if (ret == 0) { 353 mutex_log2(curthread, m, 1); 354 m->m_owner = curthread; 355 /* Add to the list of owned mutexes: */ 356 MUTEX_ASSERT_NOT_OWNED(m); 357 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 358 } else if (m->m_owner == curthread) { 359 mutex_log2(curthread, m, 2); 360 ret = mutex_self_trylock(m); 361 } /* else {} */ 362 mutex_log("mutex_lock_trylock_common %p (returns %d)\n", m, ret); 363 364 return (ret); 365 } 366 367 int 368 __pthread_mutex_trylock(pthread_mutex_t *m) 369 { 370 struct pthread *curthread = tls_get_curthread(); 371 int ret; 372 373 if (__predict_false(m == NULL)) 374 return(EINVAL); 375 /* 376 * If the mutex is statically initialized, perform the dynamic 377 * initialization: 378 */ 379 if (__predict_false(*m == NULL)) { 380 ret = init_static(curthread, m); 381 if (__predict_false(ret != 0)) 382 return (ret); 383 } 384 return (mutex_trylock_common(curthread, m)); 385 } 386 387 int 388 _pthread_mutex_trylock(pthread_mutex_t *m) 389 { 390 struct pthread *curthread = tls_get_curthread(); 391 int ret = 0; 392 393 /* 394 * If the mutex is statically initialized, perform the dynamic 395 * initialization marking the mutex private (delete safe): 396 */ 397 if (__predict_false(*m == NULL)) { 398 ret = init_static_private(curthread, m); 399 if (__predict_false(ret != 0)) 400 return (ret); 401 } 402 return (mutex_trylock_common(curthread, m)); 403 } 404 405 static int 406 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 407 const struct timespec * abstime) 408 { 409 struct timespec ts, ts2; 410 struct pthread_mutex *m; 411 int ret = 0; 412 413 m = *mutex; 414 mutex_log("mutex_lock_common %p\n", m); 415 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock); 416 if (ret == 0) { 417 mutex_log2(curthread, m, 3); 418 m->m_owner = curthread; 419 /* Add to the list of owned mutexes: */ 420 MUTEX_ASSERT_NOT_OWNED(m); 421 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 422 } else if (m->m_owner == curthread) { 423 ret = mutex_self_lock(m, abstime); 424 } else { 425 if (abstime == NULL) { 426 THR_UMTX_LOCK(curthread, &m->m_lock); 427 ret = 0; 428 } else if (__predict_false( 429 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 430 abstime->tv_nsec >= 1000000000)) { 431 ret = EINVAL; 432 } else { 433 clock_gettime(CLOCK_REALTIME, &ts); 434 timespecsub(abstime, &ts, &ts2); 435 ret = THR_UMTX_TIMEDLOCK(curthread, &m->m_lock, &ts2); 436 } 437 if (ret == 0) { 438 mutex_log2(curthread, m, 4); 439 m->m_owner = curthread; 440 /* Add to the list of owned mutexes: */ 441 MUTEX_ASSERT_NOT_OWNED(m); 442 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 443 } 444 } 445 mutex_log("mutex_lock_common %p (returns %d) lock %d,%d\n", 446 m, ret, m->m_lock, m->m_count); 447 return (ret); 448 } 449 450 int 451 __pthread_mutex_lock(pthread_mutex_t *m) 452 { 453 struct pthread *curthread; 454 int ret; 455 456 if (__predict_false(m == NULL)) 457 return(EINVAL); 458 459 /* 460 * If the mutex is statically initialized, perform the dynamic 461 * initialization: 462 */ 463 curthread = tls_get_curthread(); 464 if (__predict_false(*m == NULL)) { 465 ret = init_static(curthread, m); 466 if (__predict_false(ret)) 467 return (ret); 468 } 469 return (mutex_lock_common(curthread, m, NULL)); 470 } 471 472 int 473 _pthread_mutex_lock(pthread_mutex_t *m) 474 { 475 struct pthread *curthread; 476 int ret; 477 478 _thr_check_init(); 479 480 if (__predict_false(m == NULL)) 481 return(EINVAL); 482 483 /* 484 * If the mutex is statically initialized, perform the dynamic 485 * initialization marking it private (delete safe): 486 */ 487 curthread = tls_get_curthread(); 488 if (__predict_false(*m == NULL)) { 489 ret = init_static_private(curthread, m); 490 if (__predict_false(ret)) 491 return (ret); 492 } 493 return (mutex_lock_common(curthread, m, NULL)); 494 } 495 496 int 497 __pthread_mutex_timedlock(pthread_mutex_t * __restrict m, 498 const struct timespec * __restrict abs_timeout) 499 { 500 struct pthread *curthread; 501 int ret; 502 503 _thr_check_init(); 504 505 if (__predict_false(m == NULL)) 506 return(EINVAL); 507 508 /* 509 * If the mutex is statically initialized, perform the dynamic 510 * initialization: 511 */ 512 curthread = tls_get_curthread(); 513 if (__predict_false(*m == NULL)) { 514 ret = init_static(curthread, m); 515 if (__predict_false(ret)) 516 return (ret); 517 } 518 return (mutex_lock_common(curthread, m, abs_timeout)); 519 } 520 521 int 522 _pthread_mutex_timedlock(pthread_mutex_t *m, 523 const struct timespec *abs_timeout) 524 { 525 struct pthread *curthread; 526 int ret; 527 528 if (__predict_false(m == NULL)) 529 return(EINVAL); 530 531 curthread = tls_get_curthread(); 532 533 /* 534 * If the mutex is statically initialized, perform the dynamic 535 * initialization marking it private (delete safe): 536 */ 537 if (__predict_false(*m == NULL)) { 538 ret = init_static_private(curthread, m); 539 if (__predict_false(ret)) 540 return (ret); 541 } 542 return (mutex_lock_common(curthread, m, abs_timeout)); 543 } 544 545 int 546 _pthread_mutex_unlock(pthread_mutex_t *m) 547 { 548 if (__predict_false(m == NULL)) 549 return(EINVAL); 550 return (mutex_unlock_common(m)); 551 } 552 553 static int 554 mutex_self_trylock(pthread_mutex_t m) 555 { 556 int ret; 557 558 switch (m->m_type) { 559 /* case PTHREAD_MUTEX_DEFAULT: */ 560 case PTHREAD_MUTEX_ERRORCHECK: 561 case PTHREAD_MUTEX_NORMAL: 562 ret = EBUSY; 563 break; 564 565 case PTHREAD_MUTEX_RECURSIVE: 566 /* Increment the lock count: */ 567 if (m->m_count + 1 > 0) { 568 m->m_count++; 569 ret = 0; 570 } else 571 ret = EAGAIN; 572 break; 573 574 default: 575 /* Trap invalid mutex types; */ 576 ret = EINVAL; 577 } 578 579 return (ret); 580 } 581 582 static int 583 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 584 { 585 struct timespec ts1, ts2; 586 int ret; 587 588 switch (m->m_type) { 589 /* case PTHREAD_MUTEX_DEFAULT: */ 590 case PTHREAD_MUTEX_ERRORCHECK: 591 if (abstime) { 592 clock_gettime(CLOCK_REALTIME, &ts1); 593 timespecsub(abstime, &ts1, &ts2); 594 __sys_nanosleep(&ts2, NULL); 595 ret = ETIMEDOUT; 596 } else { 597 /* 598 * POSIX specifies that mutexes should return 599 * EDEADLK if a recursive lock is detected. 600 */ 601 ret = EDEADLK; 602 } 603 break; 604 605 case PTHREAD_MUTEX_NORMAL: 606 /* 607 * What SS2 define as a 'normal' mutex. Intentionally 608 * deadlock on attempts to get a lock you already own. 609 */ 610 ret = 0; 611 if (abstime) { 612 clock_gettime(CLOCK_REALTIME, &ts1); 613 timespecsub(abstime, &ts1, &ts2); 614 __sys_nanosleep(&ts2, NULL); 615 ret = ETIMEDOUT; 616 } else { 617 ts1.tv_sec = 30; 618 ts1.tv_nsec = 0; 619 for (;;) 620 __sys_nanosleep(&ts1, NULL); 621 } 622 break; 623 624 case PTHREAD_MUTEX_RECURSIVE: 625 /* Increment the lock count: */ 626 if (m->m_count + 1 > 0) { 627 m->m_count++; 628 ret = 0; 629 } else 630 ret = EAGAIN; 631 break; 632 633 default: 634 /* Trap invalid mutex types; */ 635 ret = EINVAL; 636 } 637 638 return (ret); 639 } 640 641 static int 642 mutex_unlock_common(pthread_mutex_t *mutex) 643 { 644 struct pthread *curthread = tls_get_curthread(); 645 struct pthread_mutex *m; 646 647 if (__predict_false((m = *mutex) == NULL)) { 648 mutex_log2(curthread, m, 252); 649 return (EINVAL); 650 } 651 mutex_log("mutex_unlock_common %p\n", m); 652 if (__predict_false(m->m_owner != curthread)) { 653 mutex_log("mutex_unlock_common %p (failedA)\n", m); 654 mutex_log2(curthread, m, 253); 655 return (EPERM); 656 } 657 658 if (__predict_false(m->m_type == PTHREAD_MUTEX_RECURSIVE && 659 m->m_count > 0)) { 660 m->m_count--; 661 mutex_log("mutex_unlock_common %p (returns 0, partial)\n", m); 662 mutex_log2(curthread, m, 254); 663 } else { 664 /* 665 * Clear the count in case this is a recursive mutex. 666 */ 667 m->m_count = 0; 668 m->m_owner = NULL; 669 /* Remove the mutex from the threads queue. */ 670 MUTEX_ASSERT_IS_OWNED(m); 671 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 672 mutex_log2(tls_get_curthread(), m, 35); 673 MUTEX_INIT_LINK(m); 674 mutex_log2(tls_get_curthread(), m, 36); 675 /* 676 * Hand off the mutex to the next waiting thread. 677 */ 678 mutex_log("mutex_unlock_common %p (returns 0) lock %d\n", 679 m, m->m_lock); 680 THR_UMTX_UNLOCK(curthread, &m->m_lock); 681 mutex_log2(tls_get_curthread(), m, 37); 682 mutex_log2(curthread, m, 255); 683 } 684 return (0); 685 } 686 687 int 688 _pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex, 689 int * __restrict prioceiling) 690 { 691 if ((mutex == NULL) || (*mutex == NULL)) 692 return (EINVAL); 693 if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) 694 return (EINVAL); 695 *prioceiling = (*mutex)->m_prio; 696 return (0); 697 } 698 699 int 700 _pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex, 701 int prioceiling, int * __restrict old_ceiling) 702 { 703 int ret = 0; 704 int tmp; 705 706 if ((mutex == NULL) || (*mutex == NULL)) 707 ret = EINVAL; 708 else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) 709 ret = EINVAL; 710 else if ((ret = _pthread_mutex_lock(mutex)) == 0) { 711 tmp = (*mutex)->m_prio; 712 (*mutex)->m_prio = prioceiling; 713 ret = _pthread_mutex_unlock(mutex); 714 *old_ceiling = tmp; 715 } 716 return(ret); 717 } 718 719 int 720 _mutex_cv_lock(pthread_mutex_t *m, int count) 721 { 722 int ret; 723 724 if ((ret = _pthread_mutex_lock(m)) == 0) { 725 (*m)->m_refcount--; 726 (*m)->m_count += count; 727 } 728 return (ret); 729 } 730 731 int 732 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 733 { 734 struct pthread *curthread = tls_get_curthread(); 735 struct pthread_mutex *m; 736 737 if (__predict_false(mutex == NULL)) 738 return (EINVAL); 739 if (__predict_false((m = *mutex) == NULL)) 740 return (EINVAL); 741 if (__predict_false(m->m_owner != curthread)) 742 return (EPERM); 743 744 *count = m->m_count; 745 m->m_count = 0; 746 m->m_refcount++; 747 mutex_log2(tls_get_curthread(), m, 45); 748 m->m_owner = NULL; 749 /* Remove the mutex from the threads queue. */ 750 MUTEX_ASSERT_IS_OWNED(m); 751 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 752 MUTEX_INIT_LINK(m); 753 THR_UMTX_UNLOCK(curthread, &m->m_lock); 754 mutex_log2(curthread, m, 250); 755 return (0); 756 } 757 758 void 759 _mutex_unlock_private(pthread_t pthread) 760 { 761 struct pthread_mutex *m, *m_next; 762 763 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 764 m_next = TAILQ_NEXT(m, m_qe); 765 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 766 _pthread_mutex_unlock(&m); 767 } 768 } 769 770 __strong_reference(__pthread_mutex_init, pthread_mutex_init); 771 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock); 772 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 773 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 774 775 /* Single underscore versions provided for libc internal usage: */ 776 /* No difference between libc and application usage of these: */ 777 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 778 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 779 __strong_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 780 __strong_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 781