1 /* $NetBSD: sys_futex.c,v 1.4 2020/04/27 23:54:43 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2018, 2019, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell and Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: sys_futex.c,v 1.4 2020/04/27 23:54:43 riastradh Exp $"); 34 35 /* 36 * Futexes 37 * 38 * The futex system call coordinates notifying threads waiting for 39 * changes on a 32-bit word of memory. The word can be managed by 40 * CPU atomic operations in userland, without system calls, as long 41 * as there is no contention. 42 * 43 * The simplest use case demonstrating the utility is: 44 * 45 * // 32-bit word of memory shared among threads or 46 * // processes in userland. lock & 1 means owned; 47 * // lock & 2 means there are waiters waiting. 48 * volatile int lock = 0; 49 * 50 * int v; 51 * 52 * // Acquire a lock. 53 * do { 54 * v = lock; 55 * if (v & 1) { 56 * // Lock is held. Set a bit to say that 57 * // there are waiters, and wait for lock 58 * // to change to anything other than v; 59 * // then retry. 60 * if (atomic_cas_uint(&lock, v, v | 2) != v) 61 * continue; 62 * futex(FUTEX_WAIT, &lock, v | 2, NULL, NULL, 0); 63 * continue; 64 * } 65 * } while (atomic_cas_uint(&lock, v, v & ~1) != v); 66 * membar_enter(); 67 * 68 * ... 69 * 70 * // Release the lock. Optimistically assume there are 71 * // no waiters first until demonstrated otherwise. 72 * membar_exit(); 73 * if (atomic_cas_uint(&lock, 1, 0) != 1) { 74 * // There may be waiters. 75 * v = atomic_swap_uint(&lock, 0); 76 * // If there are still waiters, wake one. 77 * if (v & 2) 78 * futex(FUTEX_WAKE, &lock, 1, NULL, NULL, 0); 79 * } 80 * 81 * The goal is to avoid the futex system call unless there is 82 * contention; then if there is contention, to guarantee no missed 83 * wakeups. 84 * 85 * For a simple implementation, futex(FUTEX_WAIT) could queue 86 * itself to be woken, double-check the lock word, and then sleep; 87 * spurious wakeups are generally a fact of life, so any 88 * FUTEX_WAKE could just wake every FUTEX_WAIT in the system. 89 * 90 * If this were all there is to it, we could then increase 91 * parallelism by refining the approximation: partition the 92 * waiters into buckets by hashing the lock addresses to reduce 93 * the incidence of spurious wakeups. But this is not all. 94 * 95 * The futex(FUTEX_CMP_REQUEUE, &lock, n, &lock2, m, val) 96 * operation not only wakes n waiters on lock if lock == val, but 97 * also _transfers_ m additional waiters to lock2. Unless wakeups 98 * on lock2 also trigger wakeups on lock, we cannot move waiters 99 * to lock2 if they merely share the same hash as waiters on lock. 100 * Thus, we can't approximately distribute waiters into queues by 101 * a hash function; we must distinguish futex queues exactly by 102 * lock address. 103 * 104 * For now, we use a global red/black tree to index futexes. This 105 * should be replaced by a lockless radix tree with a thread to 106 * free entries no longer in use once all lookups on all CPUs have 107 * completed. 108 * 109 * Specifically, we maintain two maps: 110 * 111 * futex_tab.va[vmspace, va] for private futexes 112 * futex_tab.oa[uvm_voaddr] for shared futexes 113 * 114 * This implementation does not support priority inheritance. 115 */ 116 117 #include <sys/types.h> 118 #include <sys/atomic.h> 119 #include <sys/condvar.h> 120 #include <sys/futex.h> 121 #include <sys/mutex.h> 122 #include <sys/rbtree.h> 123 #include <sys/queue.h> 124 125 #include <sys/syscall.h> 126 #include <sys/syscallargs.h> 127 #include <sys/syscallvar.h> 128 129 #include <uvm/uvm_extern.h> 130 131 /* 132 * Lock order: 133 * 134 * futex_tab.lock 135 * futex::fx_qlock ordered by kva of struct futex 136 * -> futex_wait::fw_lock only one at a time 137 * futex_wait::fw_lock only one at a time 138 * -> futex::fx_abortlock only one at a time 139 */ 140 141 /* 142 * union futex_key 143 * 144 * A futex is addressed either by a vmspace+va (private) or by 145 * a uvm_voaddr (shared). 146 */ 147 union futex_key { 148 struct { 149 struct vmspace *vmspace; 150 vaddr_t va; 151 } fk_private; 152 struct uvm_voaddr fk_shared; 153 }; 154 155 /* 156 * struct futex 157 * 158 * Kernel state for a futex located at a particular address in a 159 * particular virtual address space. 160 * 161 * N.B. fx_refcnt is an unsigned long because we need to be able 162 * to operate on it atomically on all systems while at the same 163 * time rendering practically impossible the chance of it reaching 164 * its max value. In practice, we're limited by the number of LWPs 165 * that can be present on the system at any given time, and the 166 * assumption is that limit will be good enough on a 32-bit platform. 167 * See futex_wake() for why overflow needs to be avoided. 168 */ 169 struct futex { 170 union futex_key fx_key; 171 unsigned long fx_refcnt; 172 bool fx_shared; 173 bool fx_on_tree; 174 struct rb_node fx_node; 175 176 kmutex_t fx_qlock; 177 TAILQ_HEAD(, futex_wait) fx_queue; 178 179 kmutex_t fx_abortlock; 180 LIST_HEAD(, futex_wait) fx_abortlist; 181 kcondvar_t fx_abortcv; 182 }; 183 184 /* 185 * struct futex_wait 186 * 187 * State for a thread to wait on a futex. Threads wait on fw_cv 188 * for fw_bitset to be set to zero. The thread may transition to 189 * a different futex queue at any time under the futex's lock. 190 */ 191 struct futex_wait { 192 kmutex_t fw_lock; 193 kcondvar_t fw_cv; 194 struct futex *fw_futex; 195 TAILQ_ENTRY(futex_wait) fw_entry; /* queue lock */ 196 LIST_ENTRY(futex_wait) fw_abort; /* queue abortlock */ 197 int fw_bitset; 198 bool fw_aborting; /* fw_lock */ 199 }; 200 201 /* 202 * futex_tab 203 * 204 * Global trees of futexes by vmspace/va and VM object address. 205 * 206 * XXX This obviously doesn't scale in parallel. We could use a 207 * pserialize-safe data structure, but there may be a high cost to 208 * frequent deletion since we don't cache futexes after we're done 209 * with them. We could use hashed locks. But for now, just make 210 * sure userland can't DoS the serial performance, by using a 211 * balanced binary tree for lookup. 212 * 213 * XXX We could use a per-process tree for the table indexed by 214 * virtual address to reduce contention between processes. 215 */ 216 static struct { 217 kmutex_t lock; 218 struct rb_tree va; 219 struct rb_tree oa; 220 } futex_tab __cacheline_aligned; 221 222 static int 223 compare_futex_key(void *cookie, const void *n, const void *k) 224 { 225 const struct futex *fa = n; 226 const union futex_key *fka = &fa->fx_key; 227 const union futex_key *fkb = k; 228 229 if ((uintptr_t)fka->fk_private.vmspace < 230 (uintptr_t)fkb->fk_private.vmspace) 231 return -1; 232 if ((uintptr_t)fka->fk_private.vmspace > 233 (uintptr_t)fkb->fk_private.vmspace) 234 return +1; 235 if (fka->fk_private.va < fkb->fk_private.va) 236 return -1; 237 if (fka->fk_private.va > fkb->fk_private.va) 238 return -1; 239 return 0; 240 } 241 242 static int 243 compare_futex(void *cookie, const void *na, const void *nb) 244 { 245 const struct futex *fa = na; 246 const struct futex *fb = nb; 247 248 return compare_futex_key(cookie, fa, &fb->fx_key); 249 } 250 251 static const rb_tree_ops_t futex_rb_ops = { 252 .rbto_compare_nodes = compare_futex, 253 .rbto_compare_key = compare_futex_key, 254 .rbto_node_offset = offsetof(struct futex, fx_node), 255 }; 256 257 static int 258 compare_futex_shared_key(void *cookie, const void *n, const void *k) 259 { 260 const struct futex *fa = n; 261 const union futex_key *fka = &fa->fx_key; 262 const union futex_key *fkb = k; 263 264 return uvm_voaddr_compare(&fka->fk_shared, &fkb->fk_shared); 265 } 266 267 static int 268 compare_futex_shared(void *cookie, const void *na, const void *nb) 269 { 270 const struct futex *fa = na; 271 const struct futex *fb = nb; 272 273 return compare_futex_shared_key(cookie, fa, &fb->fx_key); 274 } 275 276 static const rb_tree_ops_t futex_shared_rb_ops = { 277 .rbto_compare_nodes = compare_futex_shared, 278 .rbto_compare_key = compare_futex_shared_key, 279 .rbto_node_offset = offsetof(struct futex, fx_node), 280 }; 281 282 static void futex_wait_dequeue(struct futex_wait *, struct futex *); 283 284 /* 285 * futex_load(uaddr, kaddr) 286 * 287 * Perform a single atomic load to read *uaddr, and return the 288 * result in *kaddr. Return 0 on success, EFAULT if uaddr is not 289 * mapped. 290 */ 291 static inline int 292 futex_load(int *uaddr, int *kaddr) 293 { 294 return ufetch_int((u_int *)uaddr, (u_int *)kaddr); 295 } 296 297 /* 298 * futex_test(uaddr, expected) 299 * 300 * True if *uaddr == expected. False if *uaddr != expected, or if 301 * uaddr is not mapped. 302 */ 303 static bool 304 futex_test(int *uaddr, int expected) 305 { 306 int val; 307 int error; 308 309 error = futex_load(uaddr, &val); 310 if (error) 311 return false; 312 return val == expected; 313 } 314 315 /* 316 * futex_sys_init() 317 * 318 * Initialize the futex subsystem. 319 */ 320 void 321 futex_sys_init(void) 322 { 323 324 mutex_init(&futex_tab.lock, MUTEX_DEFAULT, IPL_NONE); 325 rb_tree_init(&futex_tab.va, &futex_rb_ops); 326 rb_tree_init(&futex_tab.oa, &futex_shared_rb_ops); 327 } 328 329 /* 330 * futex_sys_fini() 331 * 332 * Finalize the futex subsystem. 333 */ 334 void 335 futex_sys_fini(void) 336 { 337 338 KASSERT(RB_TREE_MIN(&futex_tab.oa) == NULL); 339 KASSERT(RB_TREE_MIN(&futex_tab.va) == NULL); 340 mutex_destroy(&futex_tab.lock); 341 } 342 343 /* 344 * futex_queue_init(f) 345 * 346 * Initialize the futex queue. Caller must call futex_queue_fini 347 * when done. 348 * 349 * Never sleeps. 350 */ 351 static void 352 futex_queue_init(struct futex *f) 353 { 354 355 mutex_init(&f->fx_qlock, MUTEX_DEFAULT, IPL_NONE); 356 mutex_init(&f->fx_abortlock, MUTEX_DEFAULT, IPL_NONE); 357 cv_init(&f->fx_abortcv, "fqabort"); 358 LIST_INIT(&f->fx_abortlist); 359 TAILQ_INIT(&f->fx_queue); 360 } 361 362 /* 363 * futex_queue_drain(f) 364 * 365 * Wait for any aborting waiters in f; then empty the queue of 366 * any stragglers and wake them. Caller must guarantee no new 367 * references to f. 368 * 369 * May sleep. 370 */ 371 static void 372 futex_queue_drain(struct futex *f) 373 { 374 struct futex_wait *fw, *fw_next; 375 376 mutex_enter(&f->fx_abortlock); 377 while (!LIST_EMPTY(&f->fx_abortlist)) 378 cv_wait(&f->fx_abortcv, &f->fx_abortlock); 379 mutex_exit(&f->fx_abortlock); 380 381 mutex_enter(&f->fx_qlock); 382 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) { 383 mutex_enter(&fw->fw_lock); 384 futex_wait_dequeue(fw, f); 385 cv_broadcast(&fw->fw_cv); 386 mutex_exit(&fw->fw_lock); 387 } 388 mutex_exit(&f->fx_qlock); 389 } 390 391 /* 392 * futex_queue_fini(fq) 393 * 394 * Finalize the futex queue initialized by futex_queue_init. Queue 395 * must be empty. Caller must not use f again until a subsequent 396 * futex_queue_init. 397 */ 398 static void 399 futex_queue_fini(struct futex *f) 400 { 401 402 KASSERT(TAILQ_EMPTY(&f->fx_queue)); 403 KASSERT(LIST_EMPTY(&f->fx_abortlist)); 404 mutex_destroy(&f->fx_qlock); 405 mutex_destroy(&f->fx_abortlock); 406 cv_destroy(&f->fx_abortcv); 407 } 408 409 /* 410 * futex_key_init(key, vm, va, shared) 411 * 412 * Initialize a futex key for lookup, etc. 413 */ 414 static int 415 futex_key_init(union futex_key *fk, struct vmspace *vm, vaddr_t va, bool shared) 416 { 417 int error = 0; 418 419 if (__predict_false(shared)) { 420 if (!uvm_voaddr_acquire(&vm->vm_map, va, &fk->fk_shared)) 421 error = EFAULT; 422 } else { 423 fk->fk_private.vmspace = vm; 424 fk->fk_private.va = va; 425 } 426 427 return error; 428 } 429 430 /* 431 * futex_key_fini(key, shared) 432 * 433 * Release a futex key. 434 */ 435 static void 436 futex_key_fini(union futex_key *fk, bool shared) 437 { 438 if (__predict_false(shared)) 439 uvm_voaddr_release(&fk->fk_shared); 440 memset(fk, 0, sizeof(*fk)); 441 } 442 443 /* 444 * futex_create(fk, shared) 445 * 446 * Create a futex. Initial reference count is 1, representing the 447 * caller. Returns NULL on failure. Always takes ownership of the 448 * key, either transferring it to the newly-created futex, or releasing 449 * the key if creation fails. 450 * 451 * Never sleeps for memory, but may sleep to acquire a lock. 452 */ 453 static struct futex * 454 futex_create(union futex_key *fk, bool shared) 455 { 456 struct futex *f; 457 458 f = kmem_alloc(sizeof(*f), KM_NOSLEEP); 459 if (f == NULL) { 460 futex_key_fini(fk, shared); 461 return NULL; 462 } 463 f->fx_key = *fk; 464 f->fx_refcnt = 1; 465 f->fx_shared = shared; 466 f->fx_on_tree = false; 467 futex_queue_init(f); 468 469 return f; 470 } 471 472 /* 473 * futex_destroy(f) 474 * 475 * Destroy a futex created with futex_create. Reference count 476 * must be zero. 477 * 478 * May sleep. 479 */ 480 static void 481 futex_destroy(struct futex *f) 482 { 483 484 ASSERT_SLEEPABLE(); 485 486 KASSERT(atomic_load_relaxed(&f->fx_refcnt) == 0); 487 KASSERT(!f->fx_on_tree); 488 489 /* Drain and destroy the private queue. */ 490 futex_queue_drain(f); 491 futex_queue_fini(f); 492 493 futex_key_fini(&f->fx_key, f->fx_shared); 494 495 kmem_free(f, sizeof(*f)); 496 } 497 498 /* 499 * futex_hold(f) 500 * 501 * Attempt to acquire a reference to f. Return 0 on success, 502 * ENFILE on too many references. 503 * 504 * Never sleeps. 505 */ 506 static int 507 futex_hold(struct futex *f) 508 { 509 unsigned long refcnt; 510 511 do { 512 refcnt = atomic_load_relaxed(&f->fx_refcnt); 513 if (refcnt == ULONG_MAX) 514 return ENFILE; 515 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt + 1) != refcnt); 516 517 return 0; 518 } 519 520 /* 521 * futex_rele(f) 522 * 523 * Release a reference to f acquired with futex_create or 524 * futex_hold. 525 * 526 * May sleep to free f. 527 */ 528 static void 529 futex_rele(struct futex *f) 530 { 531 unsigned long refcnt; 532 533 ASSERT_SLEEPABLE(); 534 535 do { 536 refcnt = atomic_load_relaxed(&f->fx_refcnt); 537 if (refcnt == 1) 538 goto trylast; 539 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt); 540 return; 541 542 trylast: 543 mutex_enter(&futex_tab.lock); 544 if (atomic_dec_ulong_nv(&f->fx_refcnt) == 0) { 545 if (f->fx_on_tree) { 546 if (__predict_false(f->fx_shared)) 547 rb_tree_remove_node(&futex_tab.oa, f); 548 else 549 rb_tree_remove_node(&futex_tab.va, f); 550 f->fx_on_tree = false; 551 } 552 } else { 553 /* References remain -- don't destroy it. */ 554 f = NULL; 555 } 556 mutex_exit(&futex_tab.lock); 557 if (f != NULL) 558 futex_destroy(f); 559 } 560 561 /* 562 * futex_rele_not_last(f) 563 * 564 * Release a reference to f acquired with futex_create or 565 * futex_hold. 566 * 567 * This version asserts that we are not dropping the last 568 * reference to f. 569 */ 570 static void 571 futex_rele_not_last(struct futex *f) 572 { 573 unsigned long refcnt; 574 575 do { 576 refcnt = atomic_load_relaxed(&f->fx_refcnt); 577 KASSERT(refcnt > 1); 578 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt); 579 } 580 581 /* 582 * futex_lookup_by_key(key, shared, &f) 583 * 584 * Try to find an existing futex va reference in the specified key 585 * On success, return 0, set f to found futex or to NULL if not found, 586 * and increment f's reference count if found. 587 * 588 * Return ENFILE if reference count too high. 589 * 590 * Internal lookup routine shared by futex_lookup() and 591 * futex_get(). 592 */ 593 static int 594 futex_lookup_by_key(union futex_key *fk, bool shared, struct futex **fp) 595 { 596 struct futex *f; 597 int error = 0; 598 599 mutex_enter(&futex_tab.lock); 600 if (__predict_false(shared)) { 601 f = rb_tree_find_node(&futex_tab.oa, fk); 602 } else { 603 f = rb_tree_find_node(&futex_tab.va, fk); 604 } 605 if (f) { 606 error = futex_hold(f); 607 if (error) 608 f = NULL; 609 } 610 *fp = f; 611 mutex_exit(&futex_tab.lock); 612 613 return error; 614 } 615 616 /* 617 * futex_insert(f, fp) 618 * 619 * Try to insert the futex f into the tree by va. If there 620 * already is a futex for its va, acquire a reference to it, and 621 * store it in *fp; otherwise store f in *fp. 622 * 623 * Return 0 on success, ENFILE if there already is a futex but its 624 * reference count is too high. 625 */ 626 static int 627 futex_insert(struct futex *f, struct futex **fp) 628 { 629 struct futex *f0; 630 int error; 631 632 KASSERT(atomic_load_relaxed(&f->fx_refcnt) != 0); 633 KASSERT(!f->fx_on_tree); 634 635 mutex_enter(&futex_tab.lock); 636 if (__predict_false(f->fx_shared)) 637 f0 = rb_tree_insert_node(&futex_tab.oa, f); 638 else 639 f0 = rb_tree_insert_node(&futex_tab.va, f); 640 if (f0 == f) { 641 f->fx_on_tree = true; 642 error = 0; 643 } else { 644 KASSERT(atomic_load_relaxed(&f0->fx_refcnt) != 0); 645 KASSERT(f0->fx_on_tree); 646 error = futex_hold(f0); 647 if (error) 648 goto out; 649 } 650 *fp = f0; 651 out: mutex_exit(&futex_tab.lock); 652 653 return error; 654 } 655 656 /* 657 * futex_lookup(uaddr, shared, &f) 658 * 659 * Find a futex at the userland pointer uaddr in the current 660 * process's VM space. On success, return the futex in f and 661 * increment its reference count. 662 * 663 * Caller must call futex_put when done. 664 */ 665 static int 666 futex_lookup(int *uaddr, bool shared, struct futex **fp) 667 { 668 union futex_key fk; 669 struct vmspace *vm = curproc->p_vmspace; 670 vaddr_t va = (vaddr_t)uaddr; 671 int error; 672 673 /* 674 * Reject unaligned user pointers so we don't cross page 675 * boundaries and so atomics will work. 676 */ 677 if ((va & 3) != 0) 678 return EINVAL; 679 680 /* Look it up. */ 681 error = futex_key_init(&fk, vm, va, shared); 682 if (error) 683 return error; 684 685 error = futex_lookup_by_key(&fk, shared, fp); 686 futex_key_fini(&fk, shared); 687 if (error) 688 return error; 689 690 KASSERT(*fp == NULL || (*fp)->fx_shared == shared); 691 KASSERT(*fp == NULL || atomic_load_relaxed(&(*fp)->fx_refcnt) != 0); 692 693 /* 694 * Success! (Caller must still check whether we found 695 * anything, but nothing went _wrong_ like trying to use 696 * unmapped memory.) 697 */ 698 KASSERT(error == 0); 699 700 return error; 701 } 702 703 /* 704 * futex_get(uaddr, shared, &f) 705 * 706 * Find or create a futex at the userland pointer uaddr in the 707 * current process's VM space. On success, return the futex in f 708 * and increment its reference count. 709 * 710 * Caller must call futex_put when done. 711 */ 712 static int 713 futex_get(int *uaddr, bool shared, struct futex **fp) 714 { 715 union futex_key fk; 716 struct vmspace *vm = curproc->p_vmspace; 717 struct futex *f = NULL; 718 vaddr_t va = (vaddr_t)uaddr; 719 int error; 720 721 /* 722 * Reject unaligned user pointers so we don't cross page 723 * boundaries and so atomics will work. 724 */ 725 if ((va & 3) != 0) 726 return EINVAL; 727 728 error = futex_key_init(&fk, vm, va, shared); 729 if (error) 730 return error; 731 732 /* 733 * Optimistically assume there already is one, and try to find 734 * it. 735 */ 736 error = futex_lookup_by_key(&fk, shared, fp); 737 if (error || *fp != NULL) { 738 /* 739 * We either found one, or there was an error. 740 * In either case, we are done with the key. 741 */ 742 futex_key_fini(&fk, shared); 743 goto out; 744 } 745 746 /* 747 * Create a futex recoard. This tranfers ownership of the key 748 * in all cases. 749 */ 750 f = futex_create(&fk, shared); 751 if (f == NULL) { 752 error = ENOMEM; 753 goto out; 754 } 755 756 /* 757 * Insert our new futex, or use existing if someone else beat 758 * us to it. 759 */ 760 error = futex_insert(f, fp); 761 if (error) 762 goto out; 763 if (*fp == f) 764 f = NULL; /* don't release on exit */ 765 766 /* Success! */ 767 KASSERT(error == 0); 768 769 out: if (f != NULL) 770 futex_rele(f); 771 KASSERT(error || *fp != NULL); 772 KASSERT(error || atomic_load_relaxed(&(*fp)->fx_refcnt) != 0); 773 return error; 774 } 775 776 /* 777 * futex_put(f) 778 * 779 * Release a futex acquired with futex_get or futex_lookup. 780 */ 781 static void 782 futex_put(struct futex *f) 783 { 784 785 futex_rele(f); 786 } 787 788 /* 789 * futex_wait_init(fw, bitset) 790 * 791 * Initialize a record for a thread to wait on a futex matching 792 * the specified bit set. Should be passed to futex_wait_enqueue 793 * before futex_wait, and should be passed to futex_wait_fini when 794 * done. 795 */ 796 static void 797 futex_wait_init(struct futex_wait *fw, int bitset) 798 { 799 800 mutex_init(&fw->fw_lock, MUTEX_DEFAULT, IPL_NONE); 801 cv_init(&fw->fw_cv, "futex"); 802 fw->fw_futex = NULL; 803 fw->fw_bitset = bitset; 804 fw->fw_aborting = false; 805 } 806 807 /* 808 * futex_wait_fini(fw) 809 * 810 * Finalize a record for a futex waiter. Must not be on any 811 * futex's queue. 812 */ 813 static void 814 futex_wait_fini(struct futex_wait *fw) 815 { 816 817 cv_destroy(&fw->fw_cv); 818 mutex_destroy(&fw->fw_lock); 819 } 820 821 /* 822 * futex_wait_enqueue(fw, f) 823 * 824 * Put fw on the futex queue. Must be done before futex_wait. 825 * Caller must hold fw's lock and f's lock, and fw must not be on 826 * any existing futex's waiter list. 827 */ 828 static void 829 futex_wait_enqueue(struct futex_wait *fw, struct futex *f) 830 { 831 832 KASSERT(mutex_owned(&f->fx_qlock)); 833 KASSERT(mutex_owned(&fw->fw_lock)); 834 KASSERT(fw->fw_futex == NULL); 835 KASSERT(!fw->fw_aborting); 836 837 fw->fw_futex = f; 838 TAILQ_INSERT_TAIL(&f->fx_queue, fw, fw_entry); 839 } 840 841 /* 842 * futex_wait_dequeue(fw, f) 843 * 844 * Remove fw from the futex queue. Precludes subsequent 845 * futex_wait until a futex_wait_enqueue. Caller must hold fw's 846 * lock and f's lock, and fw must be on f. 847 */ 848 static void 849 futex_wait_dequeue(struct futex_wait *fw, struct futex *f) 850 { 851 852 KASSERT(mutex_owned(&f->fx_qlock)); 853 KASSERT(mutex_owned(&fw->fw_lock)); 854 KASSERT(fw->fw_futex == f); 855 856 TAILQ_REMOVE(&f->fx_queue, fw, fw_entry); 857 fw->fw_futex = NULL; 858 } 859 860 /* 861 * futex_wait_abort(fw) 862 * 863 * Caller is no longer waiting for fw. Remove it from any queue 864 * if it was on one. Caller must hold fw->fw_lock. 865 */ 866 static void 867 futex_wait_abort(struct futex_wait *fw) 868 { 869 struct futex *f; 870 871 KASSERT(mutex_owned(&fw->fw_lock)); 872 873 /* 874 * Grab the futex queue. It can't go away as long as we hold 875 * fw_lock. However, we can't take the queue lock because 876 * that's a lock order reversal. 877 */ 878 f = fw->fw_futex; 879 880 /* Put us on the abort list so that fq won't go away. */ 881 mutex_enter(&f->fx_abortlock); 882 LIST_INSERT_HEAD(&f->fx_abortlist, fw, fw_abort); 883 mutex_exit(&f->fx_abortlock); 884 885 /* 886 * Mark fw as aborting so it won't lose wakeups and won't be 887 * transferred to any other queue. 888 */ 889 fw->fw_aborting = true; 890 891 /* f is now stable, so we can release fw_lock. */ 892 mutex_exit(&fw->fw_lock); 893 894 /* Now we can remove fw under the queue lock. */ 895 mutex_enter(&f->fx_qlock); 896 mutex_enter(&fw->fw_lock); 897 futex_wait_dequeue(fw, f); 898 mutex_exit(&fw->fw_lock); 899 mutex_exit(&f->fx_qlock); 900 901 /* 902 * Finally, remove us from the abort list and notify anyone 903 * waiting for the abort to complete if we were the last to go. 904 */ 905 mutex_enter(&f->fx_abortlock); 906 LIST_REMOVE(fw, fw_abort); 907 if (LIST_EMPTY(&f->fx_abortlist)) 908 cv_broadcast(&f->fx_abortcv); 909 mutex_exit(&f->fx_abortlock); 910 911 /* 912 * Release our reference to the futex now that we are not 913 * waiting for it. 914 */ 915 futex_rele(f); 916 917 /* 918 * Reacquire the fw lock as caller expects. Verify that we're 919 * aborting and no longer associated with a futex. 920 */ 921 mutex_enter(&fw->fw_lock); 922 KASSERT(fw->fw_aborting); 923 KASSERT(fw->fw_futex == NULL); 924 } 925 926 /* 927 * futex_wait(fw, deadline, clkid) 928 * 929 * fw must be a waiter on a futex's queue. Wait until deadline on 930 * the clock clkid, or forever if deadline is NULL, for a futex 931 * wakeup. Return 0 on explicit wakeup or destruction of futex, 932 * ETIMEDOUT on timeout, EINTR/ERESTART on signal. Either way, fw 933 * will no longer be on a futex queue on return. 934 */ 935 static int 936 futex_wait(struct futex_wait *fw, const struct timespec *deadline, 937 clockid_t clkid) 938 { 939 int error = 0; 940 941 /* Test and wait under the wait lock. */ 942 mutex_enter(&fw->fw_lock); 943 944 for (;;) { 945 /* If we're done yet, stop and report success. */ 946 if (fw->fw_bitset == 0 || fw->fw_futex == NULL) { 947 error = 0; 948 break; 949 } 950 951 /* If anything went wrong in the last iteration, stop. */ 952 if (error) 953 break; 954 955 /* Not done yet. Wait. */ 956 if (deadline) { 957 struct timespec ts; 958 959 /* Check our watch. */ 960 error = clock_gettime1(clkid, &ts); 961 if (error) 962 break; 963 964 /* If we're past the deadline, ETIMEDOUT. */ 965 if (timespeccmp(deadline, &ts, <=)) { 966 error = ETIMEDOUT; 967 break; 968 } 969 970 /* Count how much time is left. */ 971 timespecsub(deadline, &ts, &ts); 972 973 /* Wait for that much time, allowing signals. */ 974 error = cv_timedwait_sig(&fw->fw_cv, &fw->fw_lock, 975 tstohz(&ts)); 976 } else { 977 /* Wait indefinitely, allowing signals. */ 978 error = cv_wait_sig(&fw->fw_cv, &fw->fw_lock); 979 } 980 } 981 982 /* 983 * If we were woken up, the waker will have removed fw from the 984 * queue. But if anything went wrong, we must remove fw from 985 * the queue ourselves. While here, convert EWOULDBLOCK to 986 * ETIMEDOUT. 987 */ 988 if (error) { 989 futex_wait_abort(fw); 990 if (error == EWOULDBLOCK) 991 error = ETIMEDOUT; 992 } 993 994 mutex_exit(&fw->fw_lock); 995 996 return error; 997 } 998 999 /* 1000 * futex_wake(f, nwake, f2, nrequeue, bitset) 1001 * 1002 * Wake up to nwake waiters on f matching bitset; then, if f2 is 1003 * provided, move up to nrequeue remaining waiters on f matching 1004 * bitset to f2. Return the number of waiters actually woken. 1005 * Caller must hold the locks of f and f2, if provided. 1006 */ 1007 static unsigned 1008 futex_wake(struct futex *f, unsigned nwake, struct futex *f2, 1009 unsigned nrequeue, int bitset) 1010 { 1011 struct futex_wait *fw, *fw_next; 1012 unsigned nwoken = 0; 1013 int hold_error __diagused; 1014 1015 KASSERT(mutex_owned(&f->fx_qlock)); 1016 KASSERT(f2 == NULL || mutex_owned(&f2->fx_qlock)); 1017 1018 /* Wake up to nwake waiters, and count the number woken. */ 1019 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) { 1020 if ((fw->fw_bitset & bitset) == 0) 1021 continue; 1022 if (nwake > 0) { 1023 mutex_enter(&fw->fw_lock); 1024 if (__predict_false(fw->fw_aborting)) { 1025 mutex_exit(&fw->fw_lock); 1026 continue; 1027 } 1028 futex_wait_dequeue(fw, f); 1029 fw->fw_bitset = 0; 1030 cv_broadcast(&fw->fw_cv); 1031 mutex_exit(&fw->fw_lock); 1032 nwake--; 1033 nwoken++; 1034 /* 1035 * Drop the futex reference on behalf of the 1036 * waiter. We assert this is not the last 1037 * reference on the futex (our caller should 1038 * also have one). 1039 */ 1040 futex_rele_not_last(f); 1041 } else { 1042 break; 1043 } 1044 } 1045 1046 if (f2) { 1047 /* Move up to nrequeue waiters from f's queue to f2's queue. */ 1048 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) { 1049 if ((fw->fw_bitset & bitset) == 0) 1050 continue; 1051 if (nrequeue > 0) { 1052 mutex_enter(&fw->fw_lock); 1053 if (__predict_false(fw->fw_aborting)) { 1054 mutex_exit(&fw->fw_lock); 1055 continue; 1056 } 1057 futex_wait_dequeue(fw, f); 1058 futex_wait_enqueue(fw, f2); 1059 mutex_exit(&fw->fw_lock); 1060 nrequeue--; 1061 /* 1062 * Transfer the reference from f to f2. 1063 * As above, we assert that we are not 1064 * dropping the last reference to f here. 1065 * 1066 * XXX futex_hold() could theoretically 1067 * XXX fail here. 1068 */ 1069 futex_rele_not_last(f); 1070 hold_error = futex_hold(f2); 1071 KASSERT(hold_error == 0); 1072 } else { 1073 break; 1074 } 1075 } 1076 } else { 1077 KASSERT(nrequeue == 0); 1078 } 1079 1080 /* Return the number of waiters woken. */ 1081 return nwoken; 1082 } 1083 1084 /* 1085 * futex_queue_lock(f) 1086 * 1087 * Acquire the queue lock of f. Pair with futex_queue_unlock. Do 1088 * not use if caller needs to acquire two locks; use 1089 * futex_queue_lock2 instead. 1090 */ 1091 static void 1092 futex_queue_lock(struct futex *f) 1093 { 1094 mutex_enter(&f->fx_qlock); 1095 } 1096 1097 /* 1098 * futex_queue_unlock(f) 1099 * 1100 * Release the queue lock of f. 1101 */ 1102 static void 1103 futex_queue_unlock(struct futex *f) 1104 { 1105 mutex_exit(&f->fx_qlock); 1106 } 1107 1108 /* 1109 * futex_queue_lock2(f, f2) 1110 * 1111 * Acquire the queue locks of both f and f2, which may be null, or 1112 * which may have the same underlying queue. If they are 1113 * distinct, an arbitrary total order is chosen on the locks. 1114 * 1115 * Callers should only ever acquire multiple queue locks 1116 * simultaneously using futex_queue_lock2. 1117 */ 1118 static void 1119 futex_queue_lock2(struct futex *f, struct futex *f2) 1120 { 1121 1122 /* 1123 * If both are null, do nothing; if one is null and the other 1124 * is not, lock the other and be done with it. 1125 */ 1126 if (f == NULL && f2 == NULL) { 1127 return; 1128 } else if (f == NULL) { 1129 mutex_enter(&f2->fx_qlock); 1130 return; 1131 } else if (f2 == NULL) { 1132 mutex_enter(&f->fx_qlock); 1133 return; 1134 } 1135 1136 /* If both futexes are the same, acquire only one. */ 1137 if (f == f2) { 1138 mutex_enter(&f->fx_qlock); 1139 return; 1140 } 1141 1142 /* Otherwise, use the ordering on the kva of the futex pointer. */ 1143 if ((uintptr_t)f < (uintptr_t)f2) { 1144 mutex_enter(&f->fx_qlock); 1145 mutex_enter(&f2->fx_qlock); 1146 } else { 1147 mutex_enter(&f2->fx_qlock); 1148 mutex_enter(&f->fx_qlock); 1149 } 1150 } 1151 1152 /* 1153 * futex_queue_unlock2(f, f2) 1154 * 1155 * Release the queue locks of both f and f2, which may be null, or 1156 * which may have the same underlying queue. 1157 */ 1158 static void 1159 futex_queue_unlock2(struct futex *f, struct futex *f2) 1160 { 1161 1162 /* 1163 * If both are null, do nothing; if one is null and the other 1164 * is not, unlock the other and be done with it. 1165 */ 1166 if (f == NULL && f2 == NULL) { 1167 return; 1168 } else if (f == NULL) { 1169 mutex_exit(&f2->fx_qlock); 1170 return; 1171 } else if (f2 == NULL) { 1172 mutex_exit(&f->fx_qlock); 1173 return; 1174 } 1175 1176 /* If both futexes are the same, release only one. */ 1177 if (f == f2) { 1178 mutex_exit(&f->fx_qlock); 1179 return; 1180 } 1181 1182 /* Otherwise, use the ordering on the kva of the futex pointer. */ 1183 if ((uintptr_t)f < (uintptr_t)f2) { 1184 mutex_exit(&f2->fx_qlock); 1185 mutex_exit(&f->fx_qlock); 1186 } else { 1187 mutex_exit(&f->fx_qlock); 1188 mutex_exit(&f2->fx_qlock); 1189 } 1190 } 1191 1192 /* 1193 * futex_func_wait(uaddr, val, val3, timeout, clkid, clkflags, retval) 1194 * 1195 * Implement futex(FUTEX_WAIT). 1196 */ 1197 static int 1198 futex_func_wait(bool shared, int *uaddr, int val, int val3, 1199 const struct timespec *timeout, clockid_t clkid, int clkflags, 1200 register_t *retval) 1201 { 1202 struct futex *f; 1203 struct futex_wait wait, *fw = &wait; 1204 struct timespec ts; 1205 const struct timespec *deadline; 1206 int error; 1207 1208 /* Optimistically test before anything else. */ 1209 if (!futex_test(uaddr, val)) 1210 return EAGAIN; 1211 1212 /* Determine a deadline on the specified clock. */ 1213 if (timeout == NULL || (clkflags & TIMER_ABSTIME) == TIMER_ABSTIME) { 1214 deadline = timeout; 1215 } else { 1216 error = clock_gettime1(clkid, &ts); 1217 if (error) 1218 return error; 1219 timespecadd(&ts, timeout, &ts); 1220 deadline = &ts; 1221 } 1222 1223 /* Get the futex, creating it if necessary. */ 1224 error = futex_get(uaddr, shared, &f); 1225 if (error) 1226 return error; 1227 KASSERT(f); 1228 1229 /* Get ready to wait. */ 1230 futex_wait_init(fw, val3); 1231 1232 /* 1233 * Under the queue lock, check the value again: if it has 1234 * already changed, EAGAIN; otherwise enqueue the waiter. 1235 * Since FUTEX_WAKE will use the same lock and be done after 1236 * modifying the value, the order in which we check and enqueue 1237 * is immaterial. 1238 */ 1239 futex_queue_lock(f); 1240 if (!futex_test(uaddr, val)) { 1241 futex_queue_unlock(f); 1242 error = EAGAIN; 1243 goto out; 1244 } 1245 mutex_enter(&fw->fw_lock); 1246 futex_wait_enqueue(fw, f); 1247 mutex_exit(&fw->fw_lock); 1248 futex_queue_unlock(f); 1249 1250 /* 1251 * We cannot drop our reference to the futex here, because 1252 * we might be enqueued on a different one when we are awakened. 1253 * The references will be managed on our behalf in the requeue 1254 * and wake cases. 1255 */ 1256 f = NULL; 1257 1258 /* Wait. */ 1259 error = futex_wait(fw, deadline, clkid); 1260 if (error) 1261 goto out; 1262 1263 /* Return 0 on success, error on failure. */ 1264 *retval = 0; 1265 1266 out: if (f != NULL) 1267 futex_put(f); 1268 futex_wait_fini(fw); 1269 return error; 1270 } 1271 1272 /* 1273 * futex_func_wake(uaddr, val, val3, retval) 1274 * 1275 * Implement futex(FUTEX_WAKE) and futex(FUTEX_WAKE_BITSET). 1276 */ 1277 static int 1278 futex_func_wake(bool shared, int *uaddr, int val, int val3, register_t *retval) 1279 { 1280 struct futex *f; 1281 unsigned int nwoken = 0; 1282 int error = 0; 1283 1284 /* Reject negative number of wakeups. */ 1285 if (val < 0) { 1286 error = EINVAL; 1287 goto out; 1288 } 1289 1290 /* Look up the futex, if any. */ 1291 error = futex_lookup(uaddr, shared, &f); 1292 if (error) 1293 goto out; 1294 1295 /* If there's no futex, there are no waiters to wake. */ 1296 if (f == NULL) 1297 goto out; 1298 1299 /* 1300 * Under f's queue lock, wake the waiters and remember the 1301 * number woken. 1302 */ 1303 futex_queue_lock(f); 1304 nwoken = futex_wake(f, val, NULL, 0, val3); 1305 futex_queue_unlock(f); 1306 1307 /* Release the futex. */ 1308 futex_put(f); 1309 1310 out: 1311 /* Return the number of waiters woken. */ 1312 *retval = nwoken; 1313 1314 /* Success! */ 1315 return error; 1316 } 1317 1318 /* 1319 * futex_func_requeue(op, uaddr, val, uaddr2, val2, val3, retval) 1320 * 1321 * Implement futex(FUTEX_REQUEUE) and futex(FUTEX_CMP_REQUEUE). 1322 */ 1323 static int 1324 futex_func_requeue(bool shared, int op, int *uaddr, int val, int *uaddr2, 1325 int val2, int val3, register_t *retval) 1326 { 1327 struct futex *f = NULL, *f2 = NULL; 1328 unsigned nwoken = 0; /* default to zero woken on early return */ 1329 int error; 1330 1331 /* Reject negative number of wakeups or requeues. */ 1332 if (val < 0 || val2 < 0) { 1333 error = EINVAL; 1334 goto out; 1335 } 1336 1337 /* Look up the source futex, if any. */ 1338 error = futex_lookup(uaddr, shared, &f); 1339 if (error) 1340 goto out; 1341 1342 /* If there is none, nothing to do. */ 1343 if (f == NULL) 1344 goto out; 1345 1346 /* 1347 * We may need to create the destination futex because it's 1348 * entirely possible it does not currently have any waiters. 1349 */ 1350 error = futex_get(uaddr2, shared, &f2); 1351 if (error) 1352 goto out; 1353 1354 /* 1355 * Under the futexes' queue locks, check the value; if 1356 * unchanged from val3, wake the waiters. 1357 */ 1358 futex_queue_lock2(f, f2); 1359 if (op == FUTEX_CMP_REQUEUE && !futex_test(uaddr, val3)) { 1360 error = EAGAIN; 1361 } else { 1362 error = 0; 1363 nwoken = futex_wake(f, val, f2, val2, FUTEX_BITSET_MATCH_ANY); 1364 } 1365 futex_queue_unlock2(f, f2); 1366 1367 out: 1368 /* Return the number of waiters woken. */ 1369 *retval = nwoken; 1370 1371 /* Release the futexes if we got them. */ 1372 if (f2) 1373 futex_put(f2); 1374 if (f) 1375 futex_put(f); 1376 return error; 1377 } 1378 1379 /* 1380 * futex_validate_op_cmp(val3) 1381 * 1382 * Validate an op/cmp argument for FUTEX_WAKE_OP. 1383 */ 1384 static int 1385 futex_validate_op_cmp(int val3) 1386 { 1387 int op = __SHIFTOUT(val3, FUTEX_OP_OP_MASK); 1388 int cmp = __SHIFTOUT(val3, FUTEX_OP_CMP_MASK); 1389 1390 if (op & FUTEX_OP_OPARG_SHIFT) { 1391 int oparg = __SHIFTOUT(val3, FUTEX_OP_OPARG_MASK); 1392 if (oparg < 0) 1393 return EINVAL; 1394 if (oparg >= 32) 1395 return EINVAL; 1396 op &= ~FUTEX_OP_OPARG_SHIFT; 1397 } 1398 1399 switch (op) { 1400 case FUTEX_OP_SET: 1401 case FUTEX_OP_ADD: 1402 case FUTEX_OP_OR: 1403 case FUTEX_OP_ANDN: 1404 case FUTEX_OP_XOR: 1405 break; 1406 default: 1407 return EINVAL; 1408 } 1409 1410 switch (cmp) { 1411 case FUTEX_OP_CMP_EQ: 1412 case FUTEX_OP_CMP_NE: 1413 case FUTEX_OP_CMP_LT: 1414 case FUTEX_OP_CMP_LE: 1415 case FUTEX_OP_CMP_GT: 1416 case FUTEX_OP_CMP_GE: 1417 break; 1418 default: 1419 return EINVAL; 1420 } 1421 1422 return 0; 1423 } 1424 1425 /* 1426 * futex_compute_op(oldval, val3) 1427 * 1428 * Apply a FUTEX_WAIT_OP operation to oldval. 1429 */ 1430 static int 1431 futex_compute_op(int oldval, int val3) 1432 { 1433 int op = __SHIFTOUT(val3, FUTEX_OP_OP_MASK); 1434 int oparg = __SHIFTOUT(val3, FUTEX_OP_OPARG_MASK); 1435 1436 if (op & FUTEX_OP_OPARG_SHIFT) { 1437 KASSERT(oparg >= 0); 1438 KASSERT(oparg < 32); 1439 oparg = 1u << oparg; 1440 op &= ~FUTEX_OP_OPARG_SHIFT; 1441 } 1442 1443 switch (op) { 1444 case FUTEX_OP_SET: 1445 return oparg; 1446 1447 case FUTEX_OP_ADD: 1448 /* 1449 * Avoid signed arithmetic overflow by doing 1450 * arithmetic unsigned and converting back to signed 1451 * at the end. 1452 */ 1453 return (int)((unsigned)oldval + (unsigned)oparg); 1454 1455 case FUTEX_OP_OR: 1456 return oldval | oparg; 1457 1458 case FUTEX_OP_ANDN: 1459 return oldval & ~oparg; 1460 1461 case FUTEX_OP_XOR: 1462 return oldval ^ oparg; 1463 1464 default: 1465 panic("invalid futex op"); 1466 } 1467 } 1468 1469 /* 1470 * futex_compute_cmp(oldval, val3) 1471 * 1472 * Apply a FUTEX_WAIT_OP comparison to oldval. 1473 */ 1474 static bool 1475 futex_compute_cmp(int oldval, int val3) 1476 { 1477 int cmp = __SHIFTOUT(val3, FUTEX_OP_CMP_MASK); 1478 int cmparg = __SHIFTOUT(val3, FUTEX_OP_CMPARG_MASK); 1479 1480 switch (cmp) { 1481 case FUTEX_OP_CMP_EQ: 1482 return (oldval == cmparg); 1483 1484 case FUTEX_OP_CMP_NE: 1485 return (oldval != cmparg); 1486 1487 case FUTEX_OP_CMP_LT: 1488 return (oldval < cmparg); 1489 1490 case FUTEX_OP_CMP_LE: 1491 return (oldval <= cmparg); 1492 1493 case FUTEX_OP_CMP_GT: 1494 return (oldval > cmparg); 1495 1496 case FUTEX_OP_CMP_GE: 1497 return (oldval >= cmparg); 1498 1499 default: 1500 panic("invalid futex cmp operation"); 1501 } 1502 } 1503 1504 /* 1505 * futex_func_wake_op(uaddr, val, uaddr2, val2, val3, retval) 1506 * 1507 * Implement futex(FUTEX_WAKE_OP). 1508 */ 1509 static int 1510 futex_func_wake_op(bool shared, int *uaddr, int val, int *uaddr2, int val2, 1511 int val3, register_t *retval) 1512 { 1513 struct futex *f = NULL, *f2 = NULL; 1514 int oldval, newval, actual; 1515 unsigned nwoken = 0; 1516 int error; 1517 1518 /* Reject negative number of wakeups. */ 1519 if (val < 0 || val2 < 0) { 1520 error = EINVAL; 1521 goto out; 1522 } 1523 1524 /* Reject invalid operations before we start doing things. */ 1525 if ((error = futex_validate_op_cmp(val3)) != 0) 1526 goto out; 1527 1528 /* Look up the first futex, if any. */ 1529 error = futex_lookup(uaddr, shared, &f); 1530 if (error) 1531 goto out; 1532 1533 /* Look up the second futex, if any. */ 1534 error = futex_lookup(uaddr2, shared, &f2); 1535 if (error) 1536 goto out; 1537 1538 /* 1539 * Under the queue locks: 1540 * 1541 * 1. Read/modify/write: *uaddr2 op= oparg. 1542 * 2. Unconditionally wake uaddr. 1543 * 3. Conditionally wake uaddr2, if it previously matched val2. 1544 */ 1545 futex_queue_lock2(f, f2); 1546 do { 1547 error = futex_load(uaddr2, &oldval); 1548 if (error) 1549 goto out_unlock; 1550 newval = futex_compute_op(oldval, val3); 1551 error = ucas_int(uaddr2, oldval, newval, &actual); 1552 if (error) 1553 goto out_unlock; 1554 } while (actual != oldval); 1555 nwoken = (f ? futex_wake(f, val, NULL, 0, FUTEX_BITSET_MATCH_ANY) : 0); 1556 if (f2 && futex_compute_cmp(oldval, val3)) 1557 nwoken += futex_wake(f2, val2, NULL, 0, 1558 FUTEX_BITSET_MATCH_ANY); 1559 1560 /* Success! */ 1561 error = 0; 1562 out_unlock: 1563 futex_queue_unlock2(f, f2); 1564 1565 out: 1566 /* Return the number of waiters woken. */ 1567 *retval = nwoken; 1568 1569 /* Release the futexes, if we got them. */ 1570 if (f2) 1571 futex_put(f2); 1572 if (f) 1573 futex_put(f); 1574 return error; 1575 } 1576 1577 /* 1578 * do_futex(uaddr, op, val, timeout, uaddr2, val2, val3) 1579 * 1580 * Implement the futex system call with all the parameters 1581 * parsed out. 1582 */ 1583 int 1584 do_futex(int *uaddr, int op, int val, const struct timespec *timeout, 1585 int *uaddr2, int val2, int val3, register_t *retval) 1586 { 1587 const bool shared = (op & FUTEX_PRIVATE_FLAG) ? false : true; 1588 const clockid_t clkid = (op & FUTEX_CLOCK_REALTIME) ? CLOCK_REALTIME 1589 : CLOCK_MONOTONIC; 1590 1591 op &= FUTEX_CMD_MASK; 1592 1593 switch (op) { 1594 case FUTEX_WAIT: 1595 return futex_func_wait(shared, uaddr, val, 1596 FUTEX_BITSET_MATCH_ANY, timeout, clkid, TIMER_RELTIME, 1597 retval); 1598 1599 case FUTEX_WAKE: 1600 val3 = FUTEX_BITSET_MATCH_ANY; 1601 /* FALLTHROUGH */ 1602 case FUTEX_WAKE_BITSET: 1603 return futex_func_wake(shared, uaddr, val, val3, retval); 1604 1605 case FUTEX_REQUEUE: 1606 case FUTEX_CMP_REQUEUE: 1607 return futex_func_requeue(shared, op, uaddr, val, uaddr2, 1608 val2, val3, retval); 1609 1610 case FUTEX_WAIT_BITSET: 1611 return futex_func_wait(shared, uaddr, val, val3, timeout, 1612 clkid, TIMER_ABSTIME, retval); 1613 1614 case FUTEX_WAKE_OP: 1615 return futex_func_wake_op(shared, uaddr, val, uaddr2, val2, 1616 val3, retval); 1617 1618 case FUTEX_FD: 1619 default: 1620 return ENOSYS; 1621 } 1622 } 1623 1624 /* 1625 * sys___futex(l, uap, retval) 1626 * 1627 * __futex(2) system call: generic futex operations. 1628 */ 1629 int 1630 sys___futex(struct lwp *l, const struct sys___futex_args *uap, 1631 register_t *retval) 1632 { 1633 /* { 1634 syscallarg(int *) uaddr; 1635 syscallarg(int) op; 1636 syscallarg(int) val; 1637 syscallarg(const struct timespec *) timeout; 1638 syscallarg(int *) uaddr2; 1639 syscallarg(int) val2; 1640 syscallarg(int) val3; 1641 } */ 1642 struct timespec ts, *tsp; 1643 int error; 1644 1645 /* 1646 * Copy in the timeout argument, if specified. 1647 */ 1648 if (SCARG(uap, timeout)) { 1649 error = copyin(SCARG(uap, timeout), &ts, sizeof(ts)); 1650 if (error) 1651 return error; 1652 tsp = &ts; 1653 } else { 1654 tsp = NULL; 1655 } 1656 1657 return do_futex(SCARG(uap, uaddr), SCARG(uap, op), SCARG(uap, val), 1658 tsp, SCARG(uap, uaddr2), SCARG(uap, val2), SCARG(uap, val3), 1659 retval); 1660 } 1661 1662 /* 1663 * sys___futex_set_robust_list(l, uap, retval) 1664 * 1665 * __futex_set_robust_list(2) system call for robust futexes. 1666 */ 1667 int 1668 sys___futex_set_robust_list(struct lwp *l, 1669 const struct sys___futex_set_robust_list_args *uap, register_t *retval) 1670 { 1671 /* { 1672 syscallarg(void *) head; 1673 syscallarg(size_t) len; 1674 } */ 1675 void *head = SCARG(uap, head); 1676 1677 if (SCARG(uap, len) != _FUTEX_ROBUST_HEAD_SIZE) 1678 return EINVAL; 1679 if ((uintptr_t)head % sizeof(u_long)) 1680 return EINVAL; 1681 1682 l->l_robust_head = (uintptr_t)head; 1683 1684 return 0; 1685 } 1686 1687 /* 1688 * sys___futex_get_robust_list(l, uap, retval) 1689 * 1690 * __futex_get_robust_list(2) system call for robust futexes. 1691 */ 1692 int 1693 sys___futex_get_robust_list(struct lwp *l, 1694 const struct sys___futex_get_robust_list_args *uap, register_t *retval) 1695 { 1696 /* { 1697 syscallarg(lwpid_t) lwpid; 1698 syscallarg(void **) headp; 1699 syscallarg(size_t *) lenp; 1700 } */ 1701 void *head; 1702 const size_t len = _FUTEX_ROBUST_HEAD_SIZE; 1703 int error; 1704 1705 error = futex_robust_head_lookup(l, SCARG(uap, lwpid), &head); 1706 if (error) 1707 return error; 1708 1709 /* Copy out the head pointer and the head structure length. */ 1710 error = copyout(&head, SCARG(uap, headp), sizeof(head)); 1711 if (__predict_true(error == 0)) { 1712 error = copyout(&len, SCARG(uap, lenp), sizeof(len)); 1713 } 1714 1715 return error; 1716 } 1717 1718 /* 1719 * release_futex(uva, tid) 1720 * 1721 * Try to release the robust futex at uva in the current process 1722 * on lwp exit. If anything goes wrong, silently fail. It is the 1723 * userland program's obligation to arrange correct behaviour. 1724 */ 1725 static void 1726 release_futex(uintptr_t const uptr, lwpid_t const tid, bool const is_pi, 1727 bool const is_pending) 1728 { 1729 int *uaddr; 1730 struct futex *f; 1731 int oldval, newval, actual; 1732 int error; 1733 1734 /* If it's misaligned, tough. */ 1735 if (__predict_false(uptr & 3)) 1736 return; 1737 uaddr = (int *)uptr; 1738 1739 error = futex_load(uaddr, &oldval); 1740 if (__predict_false(error)) 1741 return; 1742 1743 /* 1744 * There are two race conditions we need to handle here: 1745 * 1746 * 1. User space cleared the futex word but died before 1747 * being able to issue the wakeup. No wakeups will 1748 * ever be issued, oops! 1749 * 1750 * 2. Awakened waiter died before being able to acquire 1751 * the futex in user space. Any other waiters are 1752 * now stuck, oops! 1753 * 1754 * In both of these cases, the futex word will be 0 (because 1755 * it's updated before the wake is issued). The best we can 1756 * do is detect this situation if it's the pending futex and 1757 * issue a wake without modifying the futex word. 1758 * 1759 * XXX eventual PI handling? 1760 */ 1761 if (__predict_false(is_pending && (oldval & ~FUTEX_WAITERS) == 0)) { 1762 register_t retval; 1763 (void) futex_func_wake(/*shared*/true, uaddr, 1, 1764 FUTEX_BITSET_MATCH_ANY, &retval); 1765 return; 1766 } 1767 1768 /* Optimistically test whether we need to do anything at all. */ 1769 if ((oldval & FUTEX_TID_MASK) != tid) 1770 return; 1771 1772 /* 1773 * We need to handle the case where this thread owned the futex, 1774 * but it was uncontended. In this case, there won't be any 1775 * kernel state to look up. All we can do is mark the futex 1776 * as a zombie to be mopped up the next time another thread 1777 * attempts to acquire it. 1778 * 1779 * N.B. It's important to ensure to set FUTEX_OWNER_DIED in 1780 * this loop, even if waiters appear while we're are doing 1781 * so. This is beause FUTEX_WAITERS is set by user space 1782 * before calling __futex() to wait, and the futex needs 1783 * to be marked as a zombie when the new waiter gets into 1784 * the kernel. 1785 */ 1786 if ((oldval & FUTEX_WAITERS) == 0) { 1787 do { 1788 error = futex_load(uaddr, &oldval); 1789 if (error) 1790 return; 1791 if ((oldval & FUTEX_TID_MASK) != tid) 1792 return; 1793 newval = oldval | FUTEX_OWNER_DIED; 1794 error = ucas_int(uaddr, oldval, newval, &actual); 1795 if (error) 1796 return; 1797 } while (actual != oldval); 1798 1799 /* 1800 * If where is still no indication of waiters, then there is 1801 * no more work for us to do. 1802 */ 1803 if ((oldval & FUTEX_WAITERS) == 0) 1804 return; 1805 } 1806 1807 /* 1808 * Look for a shared futex since we have no positive indication 1809 * it is private. If we can't, tough. 1810 */ 1811 error = futex_lookup(uaddr, /*shared*/true, &f); 1812 if (error) 1813 return; 1814 1815 /* 1816 * If there's no kernel state for this futex, there's nothing to 1817 * release. 1818 */ 1819 if (f == NULL) 1820 return; 1821 1822 /* Work under the futex queue lock. */ 1823 futex_queue_lock(f); 1824 1825 /* 1826 * Fetch the word: if the tid doesn't match ours, skip; 1827 * otherwise, set the owner-died bit, atomically. 1828 */ 1829 do { 1830 error = futex_load(uaddr, &oldval); 1831 if (error) 1832 goto out; 1833 if ((oldval & FUTEX_TID_MASK) != tid) 1834 goto out; 1835 newval = oldval | FUTEX_OWNER_DIED; 1836 error = ucas_int(uaddr, oldval, newval, &actual); 1837 if (error) 1838 goto out; 1839 } while (actual != oldval); 1840 1841 /* 1842 * If there may be waiters, try to wake one. If anything goes 1843 * wrong, tough. 1844 * 1845 * XXX eventual PI handling? 1846 */ 1847 if (oldval & FUTEX_WAITERS) 1848 (void)futex_wake(f, 1, NULL, 0, FUTEX_BITSET_MATCH_ANY); 1849 1850 /* Unlock the queue and release the futex. */ 1851 out: futex_queue_unlock(f); 1852 futex_put(f); 1853 } 1854 1855 /* 1856 * futex_robust_head_lookup(l, lwpid) 1857 * 1858 * Helper function to look up a robust head by LWP ID. 1859 */ 1860 int 1861 futex_robust_head_lookup(struct lwp *l, lwpid_t lwpid, void **headp) 1862 { 1863 struct proc *p = l->l_proc; 1864 1865 /* Find the other lwp, if requested; otherwise use our robust head. */ 1866 if (lwpid) { 1867 mutex_enter(p->p_lock); 1868 l = lwp_find(p, lwpid); 1869 if (l == NULL) { 1870 mutex_exit(p->p_lock); 1871 return ESRCH; 1872 } 1873 *headp = (void *)l->l_robust_head; 1874 mutex_exit(p->p_lock); 1875 } else { 1876 *headp = (void *)l->l_robust_head; 1877 } 1878 return 0; 1879 } 1880 1881 /* 1882 * futex_fetch_robust_head(uaddr) 1883 * 1884 * Helper routine to fetch the futex robust list head that 1885 * handles 32-bit binaries running on 64-bit kernels. 1886 */ 1887 static int 1888 futex_fetch_robust_head(uintptr_t uaddr, u_long *rhead) 1889 { 1890 #ifdef _LP64 1891 if (curproc->p_flag & PK_32) { 1892 uint32_t rhead32[_FUTEX_ROBUST_HEAD_NWORDS]; 1893 int error; 1894 1895 error = copyin((void *)uaddr, rhead32, sizeof(rhead32)); 1896 if (__predict_true(error == 0)) { 1897 for (int i = 0; i < _FUTEX_ROBUST_HEAD_NWORDS; i++) { 1898 if (i == _FUTEX_ROBUST_HEAD_OFFSET) { 1899 /* 1900 * Make sure the offset is sign- 1901 * extended. 1902 */ 1903 rhead[i] = (int32_t)rhead32[i]; 1904 } else { 1905 rhead[i] = rhead32[i]; 1906 } 1907 } 1908 } 1909 return error; 1910 } 1911 #endif /* _L64 */ 1912 1913 return copyin((void *)uaddr, rhead, 1914 sizeof(*rhead) * _FUTEX_ROBUST_HEAD_NWORDS); 1915 } 1916 1917 /* 1918 * futex_decode_robust_word(word) 1919 * 1920 * Decode a robust futex list word into the entry and entry 1921 * properties. 1922 */ 1923 static inline void 1924 futex_decode_robust_word(uintptr_t const word, uintptr_t * const entry, 1925 bool * const is_pi) 1926 { 1927 *is_pi = (word & _FUTEX_ROBUST_ENTRY_PI) ? true : false; 1928 *entry = word & ~_FUTEX_ROBUST_ENTRY_PI; 1929 } 1930 1931 /* 1932 * futex_fetch_robust_entry(uaddr) 1933 * 1934 * Helper routine to fetch and decode a robust futex entry 1935 * that handles 32-bit binaries running on 64-bit kernels. 1936 */ 1937 static int 1938 futex_fetch_robust_entry(uintptr_t const uaddr, uintptr_t * const valp, 1939 bool * const is_pi) 1940 { 1941 uintptr_t val = 0; 1942 int error = 0; 1943 1944 #ifdef _LP64 1945 if (curproc->p_flag & PK_32) { 1946 uint32_t val32; 1947 1948 error = ufetch_32((uint32_t *)uaddr, &val32); 1949 if (__predict_true(error == 0)) 1950 val = val32; 1951 } else 1952 #endif /* _LP64 */ 1953 error = ufetch_long((u_long *)uaddr, (u_long *)&val); 1954 if (__predict_false(error)) 1955 return error; 1956 1957 futex_decode_robust_word(val, valp, is_pi); 1958 return 0; 1959 } 1960 1961 /* 1962 * futex_release_all_lwp(l, tid) 1963 * 1964 * Release all l's robust futexes. If anything looks funny in 1965 * the process, give up -- it's userland's responsibility to dot 1966 * the i's and cross the t's. 1967 */ 1968 void 1969 futex_release_all_lwp(struct lwp * const l, lwpid_t const tid) 1970 { 1971 u_long rhead[_FUTEX_ROBUST_HEAD_NWORDS]; 1972 int limit = 1000000; 1973 int error; 1974 1975 /* If there's no robust list there's nothing to do. */ 1976 if (l->l_robust_head == 0) 1977 return; 1978 1979 /* Read the final snapshot of the robust list head. */ 1980 error = futex_fetch_robust_head(l->l_robust_head, rhead); 1981 if (error) { 1982 printf("WARNING: pid %jd (%s) lwp %jd tid %jd:" 1983 " unmapped robust futex list head\n", 1984 (uintmax_t)l->l_proc->p_pid, l->l_proc->p_comm, 1985 (uintmax_t)l->l_lid, (uintmax_t)tid); 1986 return; 1987 } 1988 1989 const long offset = (long)rhead[_FUTEX_ROBUST_HEAD_OFFSET]; 1990 1991 uintptr_t next, pending; 1992 bool is_pi, pending_is_pi; 1993 1994 futex_decode_robust_word(rhead[_FUTEX_ROBUST_HEAD_LIST], 1995 &next, &is_pi); 1996 futex_decode_robust_word(rhead[_FUTEX_ROBUST_HEAD_PENDING], 1997 &pending, &pending_is_pi); 1998 1999 /* 2000 * Walk down the list of locked futexes and release them, up 2001 * to one million of them before we give up. 2002 */ 2003 2004 while (next != l->l_robust_head && limit-- > 0) { 2005 /* pending handled below. */ 2006 if (next != pending) 2007 release_futex(next + offset, tid, is_pi, false); 2008 error = futex_fetch_robust_entry(next, &next, &is_pi); 2009 if (error) 2010 break; 2011 preempt_point(); 2012 } 2013 if (limit <= 0) { 2014 printf("WARNING: pid %jd (%s) lwp %jd tid %jd:" 2015 " exhausted robust futex limit\n", 2016 (uintmax_t)l->l_proc->p_pid, l->l_proc->p_comm, 2017 (uintmax_t)l->l_lid, (uintmax_t)tid); 2018 } 2019 2020 /* If there's a pending futex, it may need to be released too. */ 2021 if (pending != 0) { 2022 release_futex(pending + offset, tid, pending_is_pi, true); 2023 } 2024 } 2025