1 /* $OpenBSD: rthread_sync.c,v 1.3 2017/08/15 07:06:29 guenther Exp $ */ 2 /* 3 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org> 4 * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org> 5 * All Rights Reserved. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 /* 20 * Mutexes and conditions - synchronization functions. 21 */ 22 23 #include <assert.h> 24 #include <errno.h> 25 #include <pthread.h> 26 #include <stdlib.h> 27 #include <string.h> 28 #include <unistd.h> 29 30 #include "rthread.h" 31 #include "cancel.h" /* in libc/include */ 32 33 static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED; 34 35 /* 36 * mutexen 37 */ 38 int 39 pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr) 40 { 41 struct pthread_mutex *mutex; 42 43 mutex = calloc(1, sizeof(*mutex)); 44 if (!mutex) 45 return (errno); 46 mutex->lock = _SPINLOCK_UNLOCKED; 47 TAILQ_INIT(&mutex->lockers); 48 if (attr == NULL) { 49 mutex->type = PTHREAD_MUTEX_DEFAULT; 50 mutex->prioceiling = -1; 51 } else { 52 mutex->type = (*attr)->ma_type; 53 mutex->prioceiling = (*attr)->ma_protocol == 54 PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1; 55 } 56 *mutexp = mutex; 57 58 return (0); 59 } 60 DEF_STRONG(pthread_mutex_init); 61 62 int 63 pthread_mutex_destroy(pthread_mutex_t *mutexp) 64 { 65 struct pthread_mutex *mutex; 66 67 assert(mutexp); 68 mutex = (struct pthread_mutex *)*mutexp; 69 if (mutex) { 70 if (mutex->count || mutex->owner != NULL || 71 !TAILQ_EMPTY(&mutex->lockers)) { 72 #define MSG "pthread_mutex_destroy on mutex with waiters!\n" 73 write(2, MSG, sizeof(MSG) - 1); 74 #undef MSG 75 return (EBUSY); 76 } 77 free(mutex); 78 *mutexp = NULL; 79 } 80 return (0); 81 } 82 DEF_STRONG(pthread_mutex_destroy); 83 84 static int 85 _rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait, 86 const struct timespec *abstime) 87 { 88 struct pthread_mutex *mutex; 89 pthread_t self = pthread_self(); 90 int ret = 0; 91 92 /* 93 * If the mutex is statically initialized, perform the dynamic 94 * initialization. Note: _thread_mutex_lock() in libc requires 95 * _rthread_mutex_lock() to perform the mutex init when *mutexp 96 * is NULL. 97 */ 98 if (*mutexp == NULL) { 99 _spinlock(&static_init_lock); 100 if (*mutexp == NULL) 101 ret = pthread_mutex_init(mutexp, NULL); 102 _spinunlock(&static_init_lock); 103 if (ret != 0) 104 return (EINVAL); 105 } 106 mutex = (struct pthread_mutex *)*mutexp; 107 108 _rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex); 109 _spinlock(&mutex->lock); 110 if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) { 111 assert(mutex->count == 0); 112 mutex->owner = self; 113 } else if (mutex->owner == self) { 114 assert(mutex->count > 0); 115 116 /* already owner? handle recursive behavior */ 117 if (mutex->type != PTHREAD_MUTEX_RECURSIVE) 118 { 119 if (trywait || 120 mutex->type == PTHREAD_MUTEX_ERRORCHECK) { 121 _spinunlock(&mutex->lock); 122 return (trywait ? EBUSY : EDEADLK); 123 } 124 125 /* self-deadlock is disallowed by strict */ 126 if (mutex->type == PTHREAD_MUTEX_STRICT_NP && 127 abstime == NULL) 128 abort(); 129 130 /* self-deadlock, possibly until timeout */ 131 while (__thrsleep(self, CLOCK_REALTIME, abstime, 132 &mutex->lock, NULL) != EWOULDBLOCK) 133 _spinlock(&mutex->lock); 134 return (ETIMEDOUT); 135 } 136 if (mutex->count == INT_MAX) { 137 _spinunlock(&mutex->lock); 138 return (EAGAIN); 139 } 140 } else if (trywait) { 141 /* try failed */ 142 _spinunlock(&mutex->lock); 143 return (EBUSY); 144 } else { 145 /* add to the wait queue and block until at the head */ 146 TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting); 147 while (mutex->owner != self) { 148 ret = __thrsleep(self, CLOCK_REALTIME, abstime, 149 &mutex->lock, NULL); 150 _spinlock(&mutex->lock); 151 assert(mutex->owner != NULL); 152 if (ret == EWOULDBLOCK) { 153 if (mutex->owner == self) 154 break; 155 TAILQ_REMOVE(&mutex->lockers, self, waiting); 156 _spinunlock(&mutex->lock); 157 return (ETIMEDOUT); 158 } 159 } 160 } 161 162 mutex->count++; 163 _spinunlock(&mutex->lock); 164 165 return (0); 166 } 167 168 int 169 pthread_mutex_lock(pthread_mutex_t *p) 170 { 171 return (_rthread_mutex_lock(p, 0, NULL)); 172 } 173 DEF_STRONG(pthread_mutex_lock); 174 175 int 176 pthread_mutex_trylock(pthread_mutex_t *p) 177 { 178 return (_rthread_mutex_lock(p, 1, NULL)); 179 } 180 181 int 182 pthread_mutex_timedlock(pthread_mutex_t *p, const struct timespec *abstime) 183 { 184 return (_rthread_mutex_lock(p, 0, abstime)); 185 } 186 187 int 188 pthread_mutex_unlock(pthread_mutex_t *mutexp) 189 { 190 pthread_t self = pthread_self(); 191 struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp; 192 193 _rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self, 194 (void *)mutex); 195 196 if (mutex == NULL) 197 #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK 198 return (EPERM); 199 #elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL 200 return(0); 201 #else 202 abort(); 203 #endif 204 205 if (mutex->owner != self) { 206 if (mutex->type == PTHREAD_MUTEX_ERRORCHECK || 207 mutex->type == PTHREAD_MUTEX_RECURSIVE) 208 return (EPERM); 209 else { 210 /* 211 * For mutex type NORMAL our undefined behavior for 212 * unlocking an unlocked mutex is to succeed without 213 * error. All other undefined behaviors are to 214 * abort() immediately. 215 */ 216 if (mutex->owner == NULL && 217 mutex->type == PTHREAD_MUTEX_NORMAL) 218 return (0); 219 else 220 abort(); 221 } 222 } 223 224 if (--mutex->count == 0) { 225 pthread_t next; 226 227 _spinlock(&mutex->lock); 228 mutex->owner = next = TAILQ_FIRST(&mutex->lockers); 229 if (next != NULL) 230 TAILQ_REMOVE(&mutex->lockers, next, waiting); 231 _spinunlock(&mutex->lock); 232 if (next != NULL) 233 __thrwakeup(next, 1); 234 } 235 236 return (0); 237 } 238 DEF_STRONG(pthread_mutex_unlock); 239 240 /* 241 * condition variables 242 */ 243 int 244 pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr) 245 { 246 pthread_cond_t cond; 247 248 cond = calloc(1, sizeof(*cond)); 249 if (!cond) 250 return (errno); 251 cond->lock = _SPINLOCK_UNLOCKED; 252 TAILQ_INIT(&cond->waiters); 253 if (attr == NULL) 254 cond->clock = CLOCK_REALTIME; 255 else 256 cond->clock = (*attr)->ca_clock; 257 *condp = cond; 258 259 return (0); 260 } 261 DEF_STRONG(pthread_cond_init); 262 263 int 264 pthread_cond_destroy(pthread_cond_t *condp) 265 { 266 pthread_cond_t cond; 267 268 assert(condp); 269 cond = *condp; 270 if (cond) { 271 if (!TAILQ_EMPTY(&cond->waiters)) { 272 #define MSG "pthread_cond_destroy on condvar with waiters!\n" 273 write(2, MSG, sizeof(MSG) - 1); 274 #undef MSG 275 return (EBUSY); 276 } 277 free(cond); 278 } 279 *condp = NULL; 280 281 return (0); 282 } 283 DEF_STRONG(pthread_cond_destroy); 284 285 int 286 pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp, 287 const struct timespec *abstime) 288 { 289 pthread_cond_t cond; 290 struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp; 291 struct tib *tib = TIB_GET(); 292 pthread_t self = tib->tib_thread; 293 pthread_t next; 294 int mutex_count; 295 int canceled = 0; 296 int rv = 0; 297 int error; 298 PREP_CANCEL_POINT(tib); 299 300 if (!*condp) 301 if ((error = pthread_cond_init(condp, NULL))) 302 return (error); 303 cond = *condp; 304 _rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self, 305 (void *)cond, (void *)mutex); 306 307 if (mutex == NULL) 308 #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK 309 return (EPERM); 310 #else 311 abort(); 312 #endif 313 314 if (mutex->owner != self) { 315 if (mutex->type == PTHREAD_MUTEX_ERRORCHECK) 316 return (EPERM); 317 else 318 abort(); 319 } 320 321 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 322 abstime->tv_nsec >= 1000000000) 323 return (EINVAL); 324 325 ENTER_DELAYED_CANCEL_POINT(tib, self); 326 327 _spinlock(&cond->lock); 328 329 /* mark the condvar as being associated with this mutex */ 330 if (cond->mutex == NULL) { 331 cond->mutex = mutex; 332 assert(TAILQ_EMPTY(&cond->waiters)); 333 } else if (cond->mutex != mutex) { 334 assert(cond->mutex == mutex); 335 _spinunlock(&cond->lock); 336 LEAVE_CANCEL_POINT_INNER(tib, 1); 337 return (EINVAL); 338 } else 339 assert(! TAILQ_EMPTY(&cond->waiters)); 340 341 /* snag the count in case this is a recursive mutex */ 342 mutex_count = mutex->count; 343 344 /* transfer from the mutex queue to the condvar queue */ 345 _spinlock(&mutex->lock); 346 self->blocking_cond = cond; 347 TAILQ_INSERT_TAIL(&cond->waiters, self, waiting); 348 _spinunlock(&cond->lock); 349 350 /* wake the next guy blocked on the mutex */ 351 mutex->count = 0; 352 mutex->owner = next = TAILQ_FIRST(&mutex->lockers); 353 if (next != NULL) { 354 TAILQ_REMOVE(&mutex->lockers, next, waiting); 355 __thrwakeup(next, 1); 356 } 357 358 /* wait until we're the owner of the mutex again */ 359 while (mutex->owner != self) { 360 error = __thrsleep(self, cond->clock, abstime, 361 &mutex->lock, &self->delayed_cancel); 362 363 /* 364 * If abstime == NULL, then we're definitely waiting 365 * on the mutex instead of the condvar, and are 366 * just waiting for mutex ownership, regardless of 367 * why we woke up. 368 */ 369 if (abstime == NULL) { 370 _spinlock(&mutex->lock); 371 continue; 372 } 373 374 /* 375 * If we took a normal signal (not from 376 * cancellation) then we should just go back to 377 * sleep without changing state (timeouts, etc). 378 */ 379 if (error == EINTR && (tib->tib_canceled == 0 || 380 (tib->tib_cantcancel & CANCEL_DISABLED))) { 381 _spinlock(&mutex->lock); 382 continue; 383 } 384 385 /* 386 * The remaining reasons for waking up (normal 387 * wakeup, timeout, and cancellation) all mean that 388 * we won't be staying in the condvar queue and 389 * we'll no longer time out or be cancelable. 390 */ 391 abstime = NULL; 392 LEAVE_CANCEL_POINT_INNER(tib, 0); 393 394 /* 395 * If we're no longer in the condvar's queue then 396 * we're just waiting for mutex ownership. Need 397 * cond->lock here to prevent race with cond_signal(). 398 */ 399 _spinlock(&cond->lock); 400 if (self->blocking_cond == NULL) { 401 _spinunlock(&cond->lock); 402 _spinlock(&mutex->lock); 403 continue; 404 } 405 assert(self->blocking_cond == cond); 406 407 /* if timeout or canceled, make note of that */ 408 if (error == EWOULDBLOCK) 409 rv = ETIMEDOUT; 410 else if (error == EINTR) 411 canceled = 1; 412 413 /* transfer between the queues */ 414 TAILQ_REMOVE(&cond->waiters, self, waiting); 415 assert(mutex == cond->mutex); 416 if (TAILQ_EMPTY(&cond->waiters)) 417 cond->mutex = NULL; 418 self->blocking_cond = NULL; 419 _spinunlock(&cond->lock); 420 _spinlock(&mutex->lock); 421 422 /* mutex unlocked right now? */ 423 if (mutex->owner == NULL && 424 TAILQ_EMPTY(&mutex->lockers)) { 425 assert(mutex->count == 0); 426 mutex->owner = self; 427 break; 428 } 429 TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting); 430 } 431 432 /* restore the mutex's count */ 433 mutex->count = mutex_count; 434 _spinunlock(&mutex->lock); 435 436 LEAVE_CANCEL_POINT_INNER(tib, canceled); 437 438 return (rv); 439 } 440 441 int 442 pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp) 443 { 444 pthread_cond_t cond; 445 struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp; 446 struct tib *tib = TIB_GET(); 447 pthread_t self = tib->tib_thread; 448 pthread_t next; 449 int mutex_count; 450 int canceled = 0; 451 int error; 452 PREP_CANCEL_POINT(tib); 453 454 if (!*condp) 455 if ((error = pthread_cond_init(condp, NULL))) 456 return (error); 457 cond = *condp; 458 _rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self, 459 (void *)cond, (void *)mutex); 460 461 if (mutex == NULL) 462 #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK 463 return (EPERM); 464 #else 465 abort(); 466 #endif 467 468 if (mutex->owner != self) { 469 if (mutex->type == PTHREAD_MUTEX_ERRORCHECK) 470 return (EPERM); 471 else 472 abort(); 473 } 474 475 ENTER_DELAYED_CANCEL_POINT(tib, self); 476 477 _spinlock(&cond->lock); 478 479 /* mark the condvar as being associated with this mutex */ 480 if (cond->mutex == NULL) { 481 cond->mutex = mutex; 482 assert(TAILQ_EMPTY(&cond->waiters)); 483 } else if (cond->mutex != mutex) { 484 assert(cond->mutex == mutex); 485 _spinunlock(&cond->lock); 486 LEAVE_CANCEL_POINT_INNER(tib, 1); 487 return (EINVAL); 488 } else 489 assert(! TAILQ_EMPTY(&cond->waiters)); 490 491 /* snag the count in case this is a recursive mutex */ 492 mutex_count = mutex->count; 493 494 /* transfer from the mutex queue to the condvar queue */ 495 _spinlock(&mutex->lock); 496 self->blocking_cond = cond; 497 TAILQ_INSERT_TAIL(&cond->waiters, self, waiting); 498 _spinunlock(&cond->lock); 499 500 /* wake the next guy blocked on the mutex */ 501 mutex->count = 0; 502 mutex->owner = next = TAILQ_FIRST(&mutex->lockers); 503 if (next != NULL) { 504 TAILQ_REMOVE(&mutex->lockers, next, waiting); 505 __thrwakeup(next, 1); 506 } 507 508 /* wait until we're the owner of the mutex again */ 509 while (mutex->owner != self) { 510 error = __thrsleep(self, 0, NULL, &mutex->lock, 511 &self->delayed_cancel); 512 513 /* 514 * If we took a normal signal (not from 515 * cancellation) then we should just go back to 516 * sleep without changing state (timeouts, etc). 517 */ 518 if (error == EINTR && (tib->tib_canceled == 0 || 519 (tib->tib_cantcancel & CANCEL_DISABLED))) { 520 _spinlock(&mutex->lock); 521 continue; 522 } 523 524 /* 525 * The remaining reasons for waking up (normal 526 * wakeup and cancellation) all mean that we won't 527 * be staying in the condvar queue and we'll no 528 * longer be cancelable. 529 */ 530 LEAVE_CANCEL_POINT_INNER(tib, 0); 531 532 /* 533 * If we're no longer in the condvar's queue then 534 * we're just waiting for mutex ownership. Need 535 * cond->lock here to prevent race with cond_signal(). 536 */ 537 _spinlock(&cond->lock); 538 if (self->blocking_cond == NULL) { 539 _spinunlock(&cond->lock); 540 _spinlock(&mutex->lock); 541 continue; 542 } 543 assert(self->blocking_cond == cond); 544 545 /* if canceled, make note of that */ 546 if (error == EINTR) 547 canceled = 1; 548 549 /* transfer between the queues */ 550 TAILQ_REMOVE(&cond->waiters, self, waiting); 551 assert(mutex == cond->mutex); 552 if (TAILQ_EMPTY(&cond->waiters)) 553 cond->mutex = NULL; 554 self->blocking_cond = NULL; 555 _spinunlock(&cond->lock); 556 _spinlock(&mutex->lock); 557 558 /* mutex unlocked right now? */ 559 if (mutex->owner == NULL && 560 TAILQ_EMPTY(&mutex->lockers)) { 561 assert(mutex->count == 0); 562 mutex->owner = self; 563 break; 564 } 565 TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting); 566 } 567 568 /* restore the mutex's count */ 569 mutex->count = mutex_count; 570 _spinunlock(&mutex->lock); 571 572 LEAVE_CANCEL_POINT_INNER(tib, canceled); 573 574 return (0); 575 } 576 DEF_STRONG(pthread_cond_wait); 577 578 579 int 580 pthread_cond_signal(pthread_cond_t *condp) 581 { 582 pthread_cond_t cond; 583 struct pthread_mutex *mutex; 584 pthread_t thread; 585 int wakeup; 586 587 /* uninitialized? Then there's obviously no one waiting! */ 588 if (!*condp) 589 return 0; 590 591 cond = *condp; 592 _rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(), 593 (void *)cond, (void *)cond->mutex); 594 _spinlock(&cond->lock); 595 thread = TAILQ_FIRST(&cond->waiters); 596 if (thread == NULL) { 597 assert(cond->mutex == NULL); 598 _spinunlock(&cond->lock); 599 return (0); 600 } 601 602 assert(thread->blocking_cond == cond); 603 TAILQ_REMOVE(&cond->waiters, thread, waiting); 604 thread->blocking_cond = NULL; 605 606 mutex = cond->mutex; 607 assert(mutex != NULL); 608 if (TAILQ_EMPTY(&cond->waiters)) 609 cond->mutex = NULL; 610 611 /* link locks to prevent race with timedwait */ 612 _spinlock(&mutex->lock); 613 _spinunlock(&cond->lock); 614 615 wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers); 616 if (wakeup) 617 mutex->owner = thread; 618 else 619 TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting); 620 _spinunlock(&mutex->lock); 621 if (wakeup) 622 __thrwakeup(thread, 1); 623 624 return (0); 625 } 626 DEF_STRONG(pthread_cond_signal); 627 628 int 629 pthread_cond_broadcast(pthread_cond_t *condp) 630 { 631 pthread_cond_t cond; 632 struct pthread_mutex *mutex; 633 pthread_t thread; 634 pthread_t p; 635 int wakeup; 636 637 /* uninitialized? Then there's obviously no one waiting! */ 638 if (!*condp) 639 return 0; 640 641 cond = *condp; 642 _rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(), 643 (void *)cond, (void *)cond->mutex); 644 _spinlock(&cond->lock); 645 thread = TAILQ_FIRST(&cond->waiters); 646 if (thread == NULL) { 647 assert(cond->mutex == NULL); 648 _spinunlock(&cond->lock); 649 return (0); 650 } 651 652 mutex = cond->mutex; 653 assert(mutex != NULL); 654 655 /* walk the list, clearing the "blocked on condvar" pointer */ 656 p = thread; 657 do 658 p->blocking_cond = NULL; 659 while ((p = TAILQ_NEXT(p, waiting)) != NULL); 660 661 /* 662 * We want to transfer all the threads from the condvar's list 663 * to the mutex's list. The TAILQ_* macros don't let us do that 664 * efficiently, so this is direct list surgery. Pay attention! 665 */ 666 667 /* 1) attach the first thread to the end of the mutex's list */ 668 _spinlock(&mutex->lock); 669 wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers); 670 thread->waiting.tqe_prev = mutex->lockers.tqh_last; 671 *(mutex->lockers.tqh_last) = thread; 672 673 /* 2) fix up the end pointer for the mutex's list */ 674 mutex->lockers.tqh_last = cond->waiters.tqh_last; 675 676 if (wakeup) { 677 TAILQ_REMOVE(&mutex->lockers, thread, waiting); 678 mutex->owner = thread; 679 _spinunlock(&mutex->lock); 680 __thrwakeup(thread, 1); 681 } else 682 _spinunlock(&mutex->lock); 683 684 /* 3) reset the condvar's list and mutex pointer */ 685 TAILQ_INIT(&cond->waiters); 686 assert(cond->mutex != NULL); 687 cond->mutex = NULL; 688 _spinunlock(&cond->lock); 689 690 return (0); 691 } 692 DEF_STRONG(pthread_cond_broadcast); 693