1 /* $NetBSD: pthread_mutex.c,v 1.63 2016/10/31 23:53:12 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * To track threads waiting for mutexes to be released, we use lockless 34 * lists built on atomic operations and memory barriers. 35 * 36 * A simple spinlock would be faster and make the code easier to 37 * follow, but spinlocks are problematic in userspace. If a thread is 38 * preempted by the kernel while holding a spinlock, any other thread 39 * attempting to acquire that spinlock will needlessly busy wait. 40 * 41 * There is no good way to know that the holding thread is no longer 42 * running, nor to request a wake-up once it has begun running again. 43 * Of more concern, threads in the SCHED_FIFO class do not have a 44 * limited time quantum and so could spin forever, preventing the 45 * thread holding the spinlock from getting CPU time: it would never 46 * be released. 47 */ 48 49 #include <sys/cdefs.h> 50 __RCSID("$NetBSD: pthread_mutex.c,v 1.63 2016/10/31 23:53:12 christos Exp $"); 51 52 #include <sys/types.h> 53 #include <sys/lwpctl.h> 54 #include <sys/sched.h> 55 #include <sys/lock.h> 56 57 #include <errno.h> 58 #include <limits.h> 59 #include <stdlib.h> 60 #include <time.h> 61 #include <string.h> 62 #include <stdio.h> 63 64 #include "pthread.h" 65 #include "pthread_int.h" 66 #include "reentrant.h" 67 68 #define MUTEX_WAITERS_BIT ((uintptr_t)0x01) 69 #define MUTEX_RECURSIVE_BIT ((uintptr_t)0x02) 70 #define MUTEX_DEFERRED_BIT ((uintptr_t)0x04) 71 #define MUTEX_PROTECT_BIT ((uintptr_t)0x08) 72 #define MUTEX_THREAD ((uintptr_t)~0x0f) 73 74 #define MUTEX_HAS_WAITERS(x) ((uintptr_t)(x) & MUTEX_WAITERS_BIT) 75 #define MUTEX_RECURSIVE(x) ((uintptr_t)(x) & MUTEX_RECURSIVE_BIT) 76 #define MUTEX_PROTECT(x) ((uintptr_t)(x) & MUTEX_PROTECT_BIT) 77 #define MUTEX_OWNER(x) ((uintptr_t)(x) & MUTEX_THREAD) 78 79 #define MUTEX_GET_TYPE(x) \ 80 ((int)(((uintptr_t)(x) & 0x000000ff) >> 0)) 81 #define MUTEX_SET_TYPE(x, t) \ 82 (x) = (void *)(((uintptr_t)(x) & ~0x000000ff) | ((t) << 0)) 83 #define MUTEX_GET_PROTOCOL(x) \ 84 ((int)(((uintptr_t)(x) & 0x0000ff00) >> 8)) 85 #define MUTEX_SET_PROTOCOL(x, p) \ 86 (x) = (void *)(((uintptr_t)(x) & ~0x0000ff00) | ((p) << 8)) 87 #define MUTEX_GET_CEILING(x) \ 88 ((int)(((uintptr_t)(x) & 0x00ff0000) >> 16)) 89 #define MUTEX_SET_CEILING(x, c) \ 90 (x) = (void *)(((uintptr_t)(x) & ~0x00ff0000) | ((c) << 16)) 91 92 #if __GNUC_PREREQ__(3, 0) 93 #define NOINLINE __attribute ((noinline)) 94 #else 95 #define NOINLINE /* nothing */ 96 #endif 97 98 static void pthread__mutex_wakeup(pthread_t, pthread_mutex_t *); 99 static int pthread__mutex_lock_slow(pthread_mutex_t *, 100 const struct timespec *); 101 static int pthread__mutex_unlock_slow(pthread_mutex_t *); 102 static void pthread__mutex_pause(void); 103 104 int _pthread_mutex_held_np(pthread_mutex_t *); 105 pthread_t _pthread_mutex_owner_np(pthread_mutex_t *); 106 107 __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np) 108 __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np) 109 110 __strong_alias(__libc_mutex_init,pthread_mutex_init) 111 __strong_alias(__libc_mutex_lock,pthread_mutex_lock) 112 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock) 113 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock) 114 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy) 115 116 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init) 117 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy) 118 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype) 119 120 int 121 pthread_mutex_init(pthread_mutex_t *ptm, const pthread_mutexattr_t *attr) 122 { 123 uintptr_t type, proto, val, ceil; 124 125 if (__predict_false(__uselibcstub)) 126 return __libc_mutex_init_stub(ptm, attr); 127 128 if (attr == NULL) { 129 type = PTHREAD_MUTEX_NORMAL; 130 proto = PTHREAD_PRIO_NONE; 131 ceil = 0; 132 } else { 133 val = (uintptr_t)attr->ptma_private; 134 135 type = MUTEX_GET_TYPE(val); 136 proto = MUTEX_GET_PROTOCOL(val); 137 ceil = MUTEX_GET_CEILING(val); 138 } 139 switch (type) { 140 case PTHREAD_MUTEX_ERRORCHECK: 141 __cpu_simple_lock_set(&ptm->ptm_errorcheck); 142 ptm->ptm_owner = NULL; 143 break; 144 case PTHREAD_MUTEX_RECURSIVE: 145 __cpu_simple_lock_clear(&ptm->ptm_errorcheck); 146 ptm->ptm_owner = (void *)MUTEX_RECURSIVE_BIT; 147 break; 148 default: 149 __cpu_simple_lock_clear(&ptm->ptm_errorcheck); 150 ptm->ptm_owner = NULL; 151 break; 152 } 153 switch (proto) { 154 case PTHREAD_PRIO_PROTECT: 155 val = (uintptr_t)ptm->ptm_owner; 156 val |= MUTEX_PROTECT_BIT; 157 ptm->ptm_owner = (void *)val; 158 break; 159 160 } 161 ptm->ptm_magic = _PT_MUTEX_MAGIC; 162 ptm->ptm_waiters = NULL; 163 ptm->ptm_recursed = 0; 164 ptm->ptm_ceiling = (unsigned char)ceil; 165 166 return 0; 167 } 168 169 int 170 pthread_mutex_destroy(pthread_mutex_t *ptm) 171 { 172 173 if (__predict_false(__uselibcstub)) 174 return __libc_mutex_destroy_stub(ptm); 175 176 pthread__error(EINVAL, "Invalid mutex", 177 ptm->ptm_magic == _PT_MUTEX_MAGIC); 178 pthread__error(EBUSY, "Destroying locked mutex", 179 MUTEX_OWNER(ptm->ptm_owner) == 0); 180 181 ptm->ptm_magic = _PT_MUTEX_DEAD; 182 return 0; 183 } 184 185 int 186 pthread_mutex_lock(pthread_mutex_t *ptm) 187 { 188 pthread_t self; 189 void *val; 190 191 if (__predict_false(__uselibcstub)) 192 return __libc_mutex_lock_stub(ptm); 193 194 self = pthread__self(); 195 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self); 196 if (__predict_true(val == NULL)) { 197 #ifndef PTHREAD__ATOMIC_IS_MEMBAR 198 membar_enter(); 199 #endif 200 return 0; 201 } 202 return pthread__mutex_lock_slow(ptm, NULL); 203 } 204 205 int 206 pthread_mutex_timedlock(pthread_mutex_t* ptm, const struct timespec *ts) 207 { 208 pthread_t self; 209 void *val; 210 211 self = pthread__self(); 212 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self); 213 if (__predict_true(val == NULL)) { 214 #ifndef PTHREAD__ATOMIC_IS_MEMBAR 215 membar_enter(); 216 #endif 217 return 0; 218 } 219 return pthread__mutex_lock_slow(ptm, ts); 220 } 221 222 /* We want function call overhead. */ 223 NOINLINE static void 224 pthread__mutex_pause(void) 225 { 226 227 pthread__smt_pause(); 228 } 229 230 /* 231 * Spin while the holder is running. 'lwpctl' gives us the true 232 * status of the thread. pt_blocking is set by libpthread in order 233 * to cut out system call and kernel spinlock overhead on remote CPUs 234 * (could represent many thousands of clock cycles). pt_blocking also 235 * makes this thread yield if the target is calling sched_yield(). 236 */ 237 NOINLINE static void * 238 pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner) 239 { 240 pthread_t thread; 241 unsigned int count, i; 242 243 for (count = 2;; owner = ptm->ptm_owner) { 244 thread = (pthread_t)MUTEX_OWNER(owner); 245 if (thread == NULL) 246 break; 247 if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE || 248 thread->pt_blocking) 249 break; 250 if (count < 128) 251 count += count; 252 for (i = count; i != 0; i--) 253 pthread__mutex_pause(); 254 } 255 256 return owner; 257 } 258 259 NOINLINE static void 260 pthread__mutex_setwaiters(pthread_t self, pthread_mutex_t *ptm) 261 { 262 void *new, *owner; 263 264 /* 265 * Note that the mutex can become unlocked before we set 266 * the waiters bit. If that happens it's not safe to sleep 267 * as we may never be awoken: we must remove the current 268 * thread from the waiters list and try again. 269 * 270 * Because we are doing this atomically, we can't remove 271 * one waiter: we must remove all waiters and awken them, 272 * then sleep in _lwp_park() until we have been awoken. 273 * 274 * Issue a memory barrier to ensure that we are reading 275 * the value of ptm_owner/pt_mutexwait after we have entered 276 * the waiters list (the CAS itself must be atomic). 277 */ 278 again: 279 membar_consumer(); 280 owner = ptm->ptm_owner; 281 282 if (MUTEX_OWNER(owner) == 0) { 283 pthread__mutex_wakeup(self, ptm); 284 return; 285 } 286 if (!MUTEX_HAS_WAITERS(owner)) { 287 new = (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT); 288 if (atomic_cas_ptr(&ptm->ptm_owner, owner, new) != owner) { 289 goto again; 290 } 291 } 292 293 /* 294 * Note that pthread_mutex_unlock() can do a non-interlocked CAS. 295 * We cannot know if the presence of the waiters bit is stable 296 * while the holding thread is running. There are many assumptions; 297 * see sys/kern/kern_mutex.c for details. In short, we must spin if 298 * we see that the holder is running again. 299 */ 300 membar_sync(); 301 if (MUTEX_OWNER(owner) != (uintptr_t)self) 302 pthread__mutex_spin(ptm, owner); 303 304 if (membar_consumer(), !MUTEX_HAS_WAITERS(ptm->ptm_owner)) { 305 goto again; 306 } 307 } 308 309 NOINLINE static int 310 pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts) 311 { 312 void *waiters, *new, *owner, *next; 313 pthread_t self; 314 int serrno; 315 int error; 316 317 pthread__error(EINVAL, "Invalid mutex", 318 ptm->ptm_magic == _PT_MUTEX_MAGIC); 319 320 owner = ptm->ptm_owner; 321 self = pthread__self(); 322 323 /* Recursive or errorcheck? */ 324 if (MUTEX_OWNER(owner) == (uintptr_t)self) { 325 if (MUTEX_RECURSIVE(owner)) { 326 if (ptm->ptm_recursed == INT_MAX) 327 return EAGAIN; 328 ptm->ptm_recursed++; 329 return 0; 330 } 331 if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) 332 return EDEADLK; 333 } 334 335 /* priority protect */ 336 if (MUTEX_PROTECT(owner) && _sched_protect(ptm->ptm_ceiling) == -1) { 337 return errno; 338 } 339 serrno = errno; 340 for (;; owner = ptm->ptm_owner) { 341 /* Spin while the owner is running. */ 342 if (MUTEX_OWNER(owner) != (uintptr_t)self) 343 owner = pthread__mutex_spin(ptm, owner); 344 345 /* If it has become free, try to acquire it again. */ 346 if (MUTEX_OWNER(owner) == 0) { 347 do { 348 new = (void *) 349 ((uintptr_t)self | (uintptr_t)owner); 350 next = atomic_cas_ptr(&ptm->ptm_owner, owner, 351 new); 352 if (next == owner) { 353 errno = serrno; 354 #ifndef PTHREAD__ATOMIC_IS_MEMBAR 355 membar_enter(); 356 #endif 357 return 0; 358 } 359 owner = next; 360 } while (MUTEX_OWNER(owner) == 0); 361 /* 362 * We have lost the race to acquire the mutex. 363 * The new owner could be running on another 364 * CPU, in which case we should spin and avoid 365 * the overhead of blocking. 366 */ 367 continue; 368 } 369 370 /* 371 * Nope, still held. Add thread to the list of waiters. 372 * Issue a memory barrier to ensure mutexwait/mutexnext 373 * are visible before we enter the waiters list. 374 */ 375 self->pt_mutexwait = 1; 376 for (waiters = ptm->ptm_waiters;; waiters = next) { 377 self->pt_mutexnext = waiters; 378 membar_producer(); 379 next = atomic_cas_ptr(&ptm->ptm_waiters, waiters, self); 380 if (next == waiters) 381 break; 382 } 383 384 /* Set the waiters bit and block. */ 385 pthread__mutex_setwaiters(self, ptm); 386 387 /* 388 * We may have been awoken by the current thread above, 389 * or will be awoken by the current holder of the mutex. 390 * The key requirement is that we must not proceed until 391 * told that we are no longer waiting (via pt_mutexwait 392 * being set to zero). Otherwise it is unsafe to re-enter 393 * the thread onto the waiters list. 394 */ 395 while (self->pt_mutexwait) { 396 self->pt_blocking++; 397 error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, ts, 398 self->pt_unpark, __UNVOLATILE(&ptm->ptm_waiters), 399 __UNVOLATILE(&ptm->ptm_waiters)); 400 self->pt_unpark = 0; 401 self->pt_blocking--; 402 membar_sync(); 403 if (__predict_true(error != -1)) { 404 continue; 405 } 406 if (errno == ETIMEDOUT && self->pt_mutexwait) { 407 /*Remove self from waiters list*/ 408 pthread__mutex_wakeup(self, ptm); 409 /*priority protect*/ 410 if (MUTEX_PROTECT(owner)) 411 (void)_sched_protect(-1); 412 return ETIMEDOUT; 413 } 414 } 415 } 416 } 417 418 int 419 pthread_mutex_trylock(pthread_mutex_t *ptm) 420 { 421 pthread_t self; 422 void *val, *new, *next; 423 424 if (__predict_false(__uselibcstub)) 425 return __libc_mutex_trylock_stub(ptm); 426 427 self = pthread__self(); 428 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self); 429 if (__predict_true(val == NULL)) { 430 #ifndef PTHREAD__ATOMIC_IS_MEMBAR 431 membar_enter(); 432 #endif 433 return 0; 434 } 435 436 if (MUTEX_RECURSIVE(val)) { 437 if (MUTEX_OWNER(val) == 0) { 438 new = (void *)((uintptr_t)self | (uintptr_t)val); 439 next = atomic_cas_ptr(&ptm->ptm_owner, val, new); 440 if (__predict_true(next == val)) { 441 #ifndef PTHREAD__ATOMIC_IS_MEMBAR 442 membar_enter(); 443 #endif 444 return 0; 445 } 446 } 447 if (MUTEX_OWNER(val) == (uintptr_t)self) { 448 if (ptm->ptm_recursed == INT_MAX) 449 return EAGAIN; 450 ptm->ptm_recursed++; 451 return 0; 452 } 453 } 454 455 return EBUSY; 456 } 457 458 int 459 pthread_mutex_unlock(pthread_mutex_t *ptm) 460 { 461 pthread_t self; 462 void *value; 463 464 if (__predict_false(__uselibcstub)) 465 return __libc_mutex_unlock_stub(ptm); 466 467 /* 468 * Note this may be a non-interlocked CAS. See lock_slow() 469 * above and sys/kern/kern_mutex.c for details. 470 */ 471 #ifndef PTHREAD__ATOMIC_IS_MEMBAR 472 membar_exit(); 473 #endif 474 self = pthread__self(); 475 value = atomic_cas_ptr_ni(&ptm->ptm_owner, self, NULL); 476 if (__predict_true(value == self)) { 477 pthread__smt_wake(); 478 return 0; 479 } 480 return pthread__mutex_unlock_slow(ptm); 481 } 482 483 NOINLINE static int 484 pthread__mutex_unlock_slow(pthread_mutex_t *ptm) 485 { 486 pthread_t self, owner, new; 487 int weown, error, deferred; 488 489 pthread__error(EINVAL, "Invalid mutex", 490 ptm->ptm_magic == _PT_MUTEX_MAGIC); 491 492 self = pthread__self(); 493 owner = ptm->ptm_owner; 494 weown = (MUTEX_OWNER(owner) == (uintptr_t)self); 495 deferred = (int)((uintptr_t)owner & MUTEX_DEFERRED_BIT); 496 error = 0; 497 498 if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) { 499 if (!weown) { 500 error = EPERM; 501 new = owner; 502 } else { 503 new = NULL; 504 } 505 } else if (MUTEX_RECURSIVE(owner)) { 506 if (!weown) { 507 error = EPERM; 508 new = owner; 509 } else if (ptm->ptm_recursed) { 510 ptm->ptm_recursed--; 511 new = owner; 512 } else { 513 new = (pthread_t)MUTEX_RECURSIVE_BIT; 514 } 515 } else { 516 pthread__error(EPERM, 517 "Unlocking unlocked mutex", (owner != NULL)); 518 pthread__error(EPERM, 519 "Unlocking mutex owned by another thread", weown); 520 new = NULL; 521 } 522 523 /* 524 * Release the mutex. If there appear to be waiters, then 525 * wake them up. 526 */ 527 if (new != owner) { 528 owner = atomic_swap_ptr(&ptm->ptm_owner, new); 529 if (__predict_false(MUTEX_PROTECT(owner))) { 530 /* restore elevated priority */ 531 (void)_sched_protect(-1); 532 } 533 if (MUTEX_HAS_WAITERS(owner) != 0) { 534 pthread__mutex_wakeup(self, ptm); 535 return 0; 536 } 537 } 538 539 /* 540 * There were no waiters, but we may have deferred waking 541 * other threads until mutex unlock - we must wake them now. 542 */ 543 if (!deferred) 544 return error; 545 546 if (self->pt_nwaiters == 1) { 547 /* 548 * If the calling thread is about to block, defer 549 * unparking the target until _lwp_park() is called. 550 */ 551 if (self->pt_willpark && self->pt_unpark == 0) { 552 self->pt_unpark = self->pt_waiters[0]; 553 } else { 554 (void)_lwp_unpark(self->pt_waiters[0], 555 __UNVOLATILE(&ptm->ptm_waiters)); 556 } 557 } else { 558 (void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters, 559 __UNVOLATILE(&ptm->ptm_waiters)); 560 } 561 self->pt_nwaiters = 0; 562 563 return error; 564 } 565 566 /* 567 * pthread__mutex_wakeup: unpark threads waiting for us 568 * 569 * unpark threads on the ptm->ptm_waiters list and self->pt_waiters. 570 */ 571 572 static void 573 pthread__mutex_wakeup(pthread_t self, pthread_mutex_t *ptm) 574 { 575 pthread_t thread, next; 576 ssize_t n, rv; 577 578 /* 579 * Take ownership of the current set of waiters. No 580 * need for a memory barrier following this, all loads 581 * are dependent upon 'thread'. 582 */ 583 thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL); 584 pthread__smt_wake(); 585 586 for (;;) { 587 /* 588 * Pull waiters from the queue and add to our list. 589 * Use a memory barrier to ensure that we safely 590 * read the value of pt_mutexnext before 'thread' 591 * sees pt_mutexwait being cleared. 592 */ 593 for (n = self->pt_nwaiters, self->pt_nwaiters = 0; 594 n < pthread__unpark_max && thread != NULL; 595 thread = next) { 596 next = thread->pt_mutexnext; 597 if (thread != self) { 598 self->pt_waiters[n++] = thread->pt_lid; 599 membar_sync(); 600 } 601 thread->pt_mutexwait = 0; 602 /* No longer safe to touch 'thread' */ 603 } 604 605 switch (n) { 606 case 0: 607 return; 608 case 1: 609 /* 610 * If the calling thread is about to block, 611 * defer unparking the target until _lwp_park() 612 * is called. 613 */ 614 if (self->pt_willpark && self->pt_unpark == 0) { 615 self->pt_unpark = self->pt_waiters[0]; 616 return; 617 } 618 rv = (ssize_t)_lwp_unpark(self->pt_waiters[0], 619 __UNVOLATILE(&ptm->ptm_waiters)); 620 if (rv != 0 && errno != EALREADY && errno != EINTR && 621 errno != ESRCH) { 622 pthread__errorfunc(__FILE__, __LINE__, 623 __func__, "_lwp_unpark failed"); 624 } 625 return; 626 default: 627 rv = _lwp_unpark_all(self->pt_waiters, (size_t)n, 628 __UNVOLATILE(&ptm->ptm_waiters)); 629 if (rv != 0 && errno != EINTR) { 630 pthread__errorfunc(__FILE__, __LINE__, 631 __func__, "_lwp_unpark_all failed"); 632 } 633 break; 634 } 635 } 636 } 637 638 int 639 pthread_mutexattr_init(pthread_mutexattr_t *attr) 640 { 641 if (__predict_false(__uselibcstub)) 642 return __libc_mutexattr_init_stub(attr); 643 644 attr->ptma_magic = _PT_MUTEXATTR_MAGIC; 645 attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT; 646 return 0; 647 } 648 649 int 650 pthread_mutexattr_destroy(pthread_mutexattr_t *attr) 651 { 652 if (__predict_false(__uselibcstub)) 653 return __libc_mutexattr_destroy_stub(attr); 654 655 pthread__error(EINVAL, "Invalid mutex attribute", 656 attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 657 658 return 0; 659 } 660 661 int 662 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep) 663 { 664 665 pthread__error(EINVAL, "Invalid mutex attribute", 666 attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 667 668 *typep = MUTEX_GET_TYPE(attr->ptma_private); 669 return 0; 670 } 671 672 int 673 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) 674 { 675 676 if (__predict_false(__uselibcstub)) 677 return __libc_mutexattr_settype_stub(attr, type); 678 679 pthread__error(EINVAL, "Invalid mutex attribute", 680 attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 681 682 switch (type) { 683 case PTHREAD_MUTEX_NORMAL: 684 case PTHREAD_MUTEX_ERRORCHECK: 685 case PTHREAD_MUTEX_RECURSIVE: 686 MUTEX_SET_TYPE(attr->ptma_private, type); 687 return 0; 688 default: 689 return EINVAL; 690 } 691 } 692 693 int 694 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int*proto) 695 { 696 697 pthread__error(EINVAL, "Invalid mutex attribute", 698 attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 699 700 *proto = MUTEX_GET_PROTOCOL(attr->ptma_private); 701 return 0; 702 } 703 704 int 705 pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int proto) 706 { 707 708 pthread__error(EINVAL, "Invalid mutex attribute", 709 attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 710 711 switch (proto) { 712 case PTHREAD_PRIO_NONE: 713 case PTHREAD_PRIO_PROTECT: 714 MUTEX_SET_PROTOCOL(attr->ptma_private, proto); 715 return 0; 716 case PTHREAD_PRIO_INHERIT: 717 return ENOTSUP; 718 default: 719 return EINVAL; 720 } 721 } 722 723 int 724 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *ceil) 725 { 726 727 pthread__error(EINVAL, "Invalid mutex attribute", 728 attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 729 730 *ceil = MUTEX_GET_CEILING(attr->ptma_private); 731 return 0; 732 } 733 734 int 735 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int ceil) 736 { 737 738 pthread__error(EINVAL, "Invalid mutex attribute", 739 attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 740 741 if (ceil & ~0xff) 742 return EINVAL; 743 744 MUTEX_SET_CEILING(attr->ptma_private, ceil); 745 return 0; 746 } 747 748 #ifdef _PTHREAD_PSHARED 749 int 750 pthread_mutexattr_getpshared(const pthread_mutexattr_t * __restrict attr, 751 int * __restrict pshared) 752 { 753 754 *pshared = PTHREAD_PROCESS_PRIVATE; 755 return 0; 756 } 757 758 int 759 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared) 760 { 761 762 switch(pshared) { 763 case PTHREAD_PROCESS_PRIVATE: 764 return 0; 765 case PTHREAD_PROCESS_SHARED: 766 return ENOSYS; 767 } 768 return EINVAL; 769 } 770 #endif 771 772 /* 773 * pthread__mutex_deferwake: try to defer unparking threads in self->pt_waiters 774 * 775 * In order to avoid unnecessary contention on the interlocking mutex, 776 * we defer waking up threads until we unlock the mutex. The threads will 777 * be woken up when the calling thread (self) releases the first mutex with 778 * MUTEX_DEFERRED_BIT set. It likely be the mutex 'ptm', but no problem 779 * even if it isn't. 780 */ 781 782 void 783 pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm) 784 { 785 786 if (__predict_false(ptm == NULL || 787 MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) { 788 (void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters, 789 __UNVOLATILE(&ptm->ptm_waiters)); 790 self->pt_nwaiters = 0; 791 } else { 792 atomic_or_ulong((volatile unsigned long *) 793 (uintptr_t)&ptm->ptm_owner, 794 (unsigned long)MUTEX_DEFERRED_BIT); 795 } 796 } 797 798 int 799 pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil) 800 { 801 *ceil = ptm->ptm_ceiling; 802 return 0; 803 } 804 805 int 806 pthread_mutex_setprioceiling(pthread_mutex_t *ptm, int ceil, int *old_ceil) 807 { 808 int error; 809 810 error = pthread_mutex_lock(ptm); 811 if (error == 0) { 812 *old_ceil = ptm->ptm_ceiling; 813 /*check range*/ 814 ptm->ptm_ceiling = ceil; 815 pthread_mutex_unlock(ptm); 816 } 817 return error; 818 } 819 820 int 821 _pthread_mutex_held_np(pthread_mutex_t *ptm) 822 { 823 824 return MUTEX_OWNER(ptm->ptm_owner) == (uintptr_t)pthread__self(); 825 } 826 827 pthread_t 828 _pthread_mutex_owner_np(pthread_mutex_t *ptm) 829 { 830 831 return (pthread_t)MUTEX_OWNER(ptm->ptm_owner); 832 } 833