1 /* $NetBSD: subr_psref.c,v 1.15 2021/07/21 06:35:45 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2016 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Passive references 34 * 35 * Passive references are references to objects that guarantee the 36 * object will not be destroyed until the reference is released. 37 * 38 * Passive references require no interprocessor synchronization to 39 * acquire or release. However, destroying the target of passive 40 * references requires expensive interprocessor synchronization -- 41 * xcalls to determine on which CPUs the object is still in use. 42 * 43 * Passive references may be held only on a single CPU and by a 44 * single LWP. They require the caller to allocate a little stack 45 * space, a struct psref object. Sleeping while a passive 46 * reference is held is allowed, provided that the owner's LWP is 47 * bound to a CPU -- e.g., the owner is a softint or a bound 48 * kthread. However, sleeping should be kept to a short duration, 49 * e.g. sleeping on an adaptive lock. 50 * 51 * Passive references serve as an intermediate stage between 52 * reference counting and passive serialization (pserialize(9)): 53 * 54 * - If you need references to transfer from CPU to CPU or LWP to 55 * LWP, or if you need long-term references, you must use 56 * reference counting, e.g. with atomic operations or locks, 57 * which incurs interprocessor synchronization for every use -- 58 * cheaper than an xcall, but not scalable. 59 * 60 * - If all users *guarantee* that they will not sleep, then it is 61 * not necessary to use passive references: you may as well just 62 * use the even cheaper pserialize(9), because you have 63 * satisfied the requirements of a pserialize read section. 64 */ 65 66 #include <sys/cdefs.h> 67 __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.15 2021/07/21 06:35:45 skrll Exp $"); 68 69 #include <sys/param.h> 70 #include <sys/types.h> 71 #include <sys/condvar.h> 72 #include <sys/cpu.h> 73 #include <sys/intr.h> 74 #include <sys/kmem.h> 75 #include <sys/lwp.h> 76 #include <sys/mutex.h> 77 #include <sys/percpu.h> 78 #include <sys/psref.h> 79 #include <sys/queue.h> 80 #include <sys/xcall.h> 81 #include <sys/lwp.h> 82 83 SLIST_HEAD(psref_head, psref); 84 85 static bool _psref_held(const struct psref_target *, struct psref_class *, 86 bool); 87 88 /* 89 * struct psref_class 90 * 91 * Private global state for a class of passive reference targets. 92 * Opaque to callers. 93 */ 94 struct psref_class { 95 kmutex_t prc_lock; 96 kcondvar_t prc_cv; 97 struct percpu *prc_percpu; /* struct psref_cpu */ 98 ipl_cookie_t prc_iplcookie; 99 unsigned int prc_xc_flags; 100 }; 101 102 /* 103 * struct psref_cpu 104 * 105 * Private per-CPU state for a class of passive reference targets. 106 * Not exposed by the API. 107 */ 108 struct psref_cpu { 109 struct psref_head pcpu_head; 110 }; 111 112 /* 113 * Data structures and functions for debugging. 114 */ 115 #ifndef PSREF_DEBUG_NITEMS 116 #define PSREF_DEBUG_NITEMS 16 117 #endif 118 119 struct psref_debug_item { 120 void *prdi_caller; 121 struct psref *prdi_psref; 122 }; 123 124 struct psref_debug { 125 int prd_refs_peek; 126 struct psref_debug_item prd_items[PSREF_DEBUG_NITEMS]; 127 }; 128 129 #ifdef PSREF_DEBUG 130 static void psref_debug_acquire(struct psref *); 131 static void psref_debug_release(struct psref *); 132 133 static void psref_debug_lwp_free(void *); 134 135 static specificdata_key_t psref_debug_lwp_key; 136 #endif 137 138 /* 139 * psref_init() 140 */ 141 void 142 psref_init(void) 143 { 144 145 #ifdef PSREF_DEBUG 146 lwp_specific_key_create(&psref_debug_lwp_key, psref_debug_lwp_free); 147 #endif 148 } 149 150 /* 151 * psref_class_create(name, ipl) 152 * 153 * Create a new passive reference class, with the given wchan name 154 * and ipl. 155 */ 156 struct psref_class * 157 psref_class_create(const char *name, int ipl) 158 { 159 struct psref_class *class; 160 161 ASSERT_SLEEPABLE(); 162 163 class = kmem_alloc(sizeof(*class), KM_SLEEP); 164 class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu)); 165 mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl); 166 cv_init(&class->prc_cv, name); 167 class->prc_iplcookie = makeiplcookie(ipl); 168 class->prc_xc_flags = XC_HIGHPRI_IPL(ipl); 169 170 return class; 171 } 172 173 #ifdef DIAGNOSTIC 174 static void 175 psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused) 176 { 177 const struct psref_cpu *pcpu = p; 178 bool *retp = cookie; 179 180 if (!SLIST_EMPTY(&pcpu->pcpu_head)) 181 *retp = false; 182 } 183 184 static bool 185 psref_class_drained_p(const struct psref_class *prc) 186 { 187 bool ret = true; 188 189 percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret); 190 191 return ret; 192 } 193 #endif /* DIAGNOSTIC */ 194 195 /* 196 * psref_class_destroy(class) 197 * 198 * Destroy a passive reference class and free memory associated 199 * with it. All targets in this class must have been drained and 200 * destroyed already. 201 */ 202 void 203 psref_class_destroy(struct psref_class *class) 204 { 205 206 KASSERT(psref_class_drained_p(class)); 207 208 cv_destroy(&class->prc_cv); 209 mutex_destroy(&class->prc_lock); 210 percpu_free(class->prc_percpu, sizeof(struct psref_cpu)); 211 kmem_free(class, sizeof(*class)); 212 } 213 214 /* 215 * psref_target_init(target, class) 216 * 217 * Initialize a passive reference target in the specified class. 218 * The caller is responsible for issuing a membar_producer after 219 * psref_target_init and before exposing a pointer to the target 220 * to other CPUs. 221 */ 222 void 223 psref_target_init(struct psref_target *target, 224 struct psref_class *class) 225 { 226 227 target->prt_class = class; 228 target->prt_draining = false; 229 } 230 231 #ifdef DEBUG 232 static bool 233 psref_exist(struct psref_cpu *pcpu, struct psref *psref) 234 { 235 struct psref *_psref; 236 237 SLIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) { 238 if (_psref == psref) 239 return true; 240 } 241 return false; 242 } 243 244 static void 245 psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref, 246 const struct psref_target *target) 247 { 248 bool found = false; 249 250 found = psref_exist(pcpu, psref); 251 if (found) { 252 panic("The psref is already in the list (acquiring twice?): " 253 "psref=%p target=%p", psref, target); 254 } 255 } 256 257 static void 258 psref_check_existence(struct psref_cpu *pcpu, struct psref *psref, 259 const struct psref_target *target) 260 { 261 bool found = false; 262 263 found = psref_exist(pcpu, psref); 264 if (!found) { 265 panic("The psref isn't in the list (releasing unused psref?): " 266 "psref=%p target=%p", psref, target); 267 } 268 } 269 #endif /* DEBUG */ 270 271 /* 272 * psref_acquire(psref, target, class) 273 * 274 * Acquire a passive reference to the specified target, which must 275 * be in the specified class. 276 * 277 * The caller must guarantee that the target will not be destroyed 278 * before psref_acquire returns. 279 * 280 * The caller must additionally guarantee that it will not switch 281 * CPUs before releasing the passive reference, either by 282 * disabling kpreemption and avoiding sleeps, or by being in a 283 * softint or in an LWP bound to a CPU. 284 */ 285 void 286 psref_acquire(struct psref *psref, const struct psref_target *target, 287 struct psref_class *class) 288 { 289 struct psref_cpu *pcpu; 290 int s; 291 292 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 293 ISSET(curlwp->l_pflag, LP_BOUND)), 294 "passive references are CPU-local," 295 " but preemption is enabled and the caller is not" 296 " in a softint or CPU-bound LWP"); 297 KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p", 298 target); 299 KASSERTMSG((target->prt_class == class), 300 "mismatched psref target class: %p (ref) != %p (expected)", 301 target->prt_class, class); 302 303 /* Block interrupts and acquire the current CPU's reference list. */ 304 s = splraiseipl(class->prc_iplcookie); 305 pcpu = percpu_getref(class->prc_percpu); 306 307 #ifdef DEBUG 308 /* Sanity-check if the target is already acquired with the same psref. */ 309 psref_check_duplication(pcpu, psref, target); 310 #endif 311 312 /* Record our reference. */ 313 SLIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry); 314 psref->psref_target = target; 315 psref->psref_lwp = curlwp; 316 psref->psref_cpu = curcpu(); 317 318 /* Release the CPU list and restore interrupts. */ 319 percpu_putref(class->prc_percpu); 320 splx(s); 321 322 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG) 323 curlwp->l_psrefs++; 324 #endif 325 #ifdef PSREF_DEBUG 326 psref_debug_acquire(psref); 327 #endif 328 } 329 330 /* 331 * psref_release(psref, target, class) 332 * 333 * Release a passive reference to the specified target, which must 334 * be in the specified class. 335 * 336 * The caller must not have switched CPUs or LWPs since acquiring 337 * the passive reference. 338 */ 339 void 340 psref_release(struct psref *psref, const struct psref_target *target, 341 struct psref_class *class) 342 { 343 struct psref_cpu *pcpu; 344 int s; 345 346 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 347 ISSET(curlwp->l_pflag, LP_BOUND)), 348 "passive references are CPU-local," 349 " but preemption is enabled and the caller is not" 350 " in a softint or CPU-bound LWP"); 351 KASSERTMSG((target->prt_class == class), 352 "mismatched psref target class: %p (ref) != %p (expected)", 353 target->prt_class, class); 354 355 /* Make sure the psref looks sensible. */ 356 KASSERTMSG((psref->psref_target == target), 357 "passive reference target mismatch: %p (ref) != %p (expected)", 358 psref->psref_target, target); 359 KASSERTMSG((psref->psref_lwp == curlwp), 360 "passive reference transferred from lwp %p to lwp %p", 361 psref->psref_lwp, curlwp); 362 KASSERTMSG((psref->psref_cpu == curcpu()), 363 "passive reference transferred from CPU %u to CPU %u", 364 cpu_index(psref->psref_cpu), cpu_index(curcpu())); 365 366 /* 367 * Block interrupts and remove the psref from the current CPU's 368 * list. No need to percpu_getref or get the head of the list, 369 * and the caller guarantees that we are bound to a CPU anyway 370 * (as does blocking interrupts). 371 */ 372 s = splraiseipl(class->prc_iplcookie); 373 pcpu = percpu_getref(class->prc_percpu); 374 #ifdef DEBUG 375 /* Sanity-check if the target is surely acquired before. */ 376 psref_check_existence(pcpu, psref, target); 377 #endif 378 SLIST_REMOVE(&pcpu->pcpu_head, psref, psref, psref_entry); 379 percpu_putref(class->prc_percpu); 380 splx(s); 381 382 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG) 383 KASSERT(curlwp->l_psrefs > 0); 384 curlwp->l_psrefs--; 385 #endif 386 #ifdef PSREF_DEBUG 387 psref_debug_release(psref); 388 #endif 389 390 /* If someone is waiting for users to drain, notify 'em. */ 391 if (__predict_false(target->prt_draining)) 392 cv_broadcast(&class->prc_cv); 393 } 394 395 /* 396 * psref_copy(pto, pfrom, class) 397 * 398 * Copy a passive reference from pfrom, which must be in the 399 * specified class, to pto. Both pfrom and pto must later be 400 * released with psref_release. 401 * 402 * The caller must not have switched CPUs or LWPs since acquiring 403 * pfrom, and must not switch CPUs or LWPs before releasing both 404 * pfrom and pto. 405 */ 406 void 407 psref_copy(struct psref *pto, const struct psref *pfrom, 408 struct psref_class *class) 409 { 410 struct psref_cpu *pcpu; 411 int s; 412 413 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 414 ISSET(curlwp->l_pflag, LP_BOUND)), 415 "passive references are CPU-local," 416 " but preemption is enabled and the caller is not" 417 " in a softint or CPU-bound LWP"); 418 KASSERTMSG((pto != pfrom), 419 "can't copy passive reference to itself: %p", 420 pto); 421 422 /* Make sure the pfrom reference looks sensible. */ 423 KASSERTMSG((pfrom->psref_lwp == curlwp), 424 "passive reference transferred from lwp %p to lwp %p", 425 pfrom->psref_lwp, curlwp); 426 KASSERTMSG((pfrom->psref_cpu == curcpu()), 427 "passive reference transferred from CPU %u to CPU %u", 428 cpu_index(pfrom->psref_cpu), cpu_index(curcpu())); 429 KASSERTMSG((pfrom->psref_target->prt_class == class), 430 "mismatched psref target class: %p (ref) != %p (expected)", 431 pfrom->psref_target->prt_class, class); 432 433 /* Block interrupts and acquire the current CPU's reference list. */ 434 s = splraiseipl(class->prc_iplcookie); 435 pcpu = percpu_getref(class->prc_percpu); 436 437 /* Record the new reference. */ 438 SLIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry); 439 pto->psref_target = pfrom->psref_target; 440 pto->psref_lwp = curlwp; 441 pto->psref_cpu = curcpu(); 442 443 /* Release the CPU list and restore interrupts. */ 444 percpu_putref(class->prc_percpu); 445 splx(s); 446 447 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG) 448 curlwp->l_psrefs++; 449 #endif 450 } 451 452 /* 453 * struct psreffed 454 * 455 * Global state for draining a psref target. 456 */ 457 struct psreffed { 458 struct psref_class *class; 459 struct psref_target *target; 460 bool ret; 461 }; 462 463 static void 464 psreffed_p_xc(void *cookie0, void *cookie1 __unused) 465 { 466 struct psreffed *P = cookie0; 467 468 /* 469 * If we hold a psref to the target, then answer true. 470 * 471 * This is the only dynamic decision that may be made with 472 * psref_held. 473 * 474 * No need to lock anything here: every write transitions from 475 * false to true, so there can be no conflicting writes. No 476 * need for a memory barrier here because P->ret is read only 477 * after xc_wait, which has already issued any necessary memory 478 * barriers. 479 */ 480 if (_psref_held(P->target, P->class, true)) 481 P->ret = true; 482 } 483 484 static bool 485 psreffed_p(struct psref_target *target, struct psref_class *class) 486 { 487 struct psreffed P = { 488 .class = class, 489 .target = target, 490 .ret = false, 491 }; 492 493 if (__predict_true(mp_online)) { 494 /* 495 * Ask all CPUs to say whether they hold a psref to the 496 * target. 497 */ 498 xc_wait(xc_broadcast(class->prc_xc_flags, &psreffed_p_xc, &P, 499 NULL)); 500 } else 501 psreffed_p_xc(&P, NULL); 502 503 return P.ret; 504 } 505 506 /* 507 * psref_target_destroy(target, class) 508 * 509 * Destroy a passive reference target. Waits for all existing 510 * references to drain. Caller must guarantee no new references 511 * will be acquired once it calls psref_target_destroy, e.g. by 512 * removing the target from a global list first. May sleep. 513 */ 514 void 515 psref_target_destroy(struct psref_target *target, struct psref_class *class) 516 { 517 518 ASSERT_SLEEPABLE(); 519 520 KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p", 521 target); 522 KASSERTMSG((target->prt_class == class), 523 "mismatched psref target class: %p (ref) != %p (expected)", 524 target->prt_class, class); 525 526 /* Request psref_release to notify us when done. */ 527 target->prt_draining = true; 528 529 /* Wait until there are no more references on any CPU. */ 530 while (psreffed_p(target, class)) { 531 /* 532 * This enter/wait/exit business looks wrong, but it is 533 * both necessary, because psreffed_p performs a 534 * low-priority xcall and hence cannot run while a 535 * mutex is locked, and OK, because the wait is timed 536 * -- explicit wakeups are only an optimization. 537 */ 538 mutex_enter(&class->prc_lock); 539 (void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1); 540 mutex_exit(&class->prc_lock); 541 } 542 543 /* No more references. Cause subsequent psref_acquire to kassert. */ 544 target->prt_class = NULL; 545 } 546 547 static bool 548 _psref_held(const struct psref_target *target, struct psref_class *class, 549 bool lwp_mismatch_ok) 550 { 551 const struct psref_cpu *pcpu; 552 const struct psref *psref; 553 int s; 554 bool held = false; 555 556 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 557 ISSET(curlwp->l_pflag, LP_BOUND)), 558 "passive references are CPU-local," 559 " but preemption is enabled and the caller is not" 560 " in a softint or CPU-bound LWP"); 561 KASSERTMSG((target->prt_class == class), 562 "mismatched psref target class: %p (ref) != %p (expected)", 563 target->prt_class, class); 564 565 /* Block interrupts and acquire the current CPU's reference list. */ 566 s = splraiseipl(class->prc_iplcookie); 567 pcpu = percpu_getref(class->prc_percpu); 568 569 /* Search through all the references on this CPU. */ 570 SLIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) { 571 /* Sanity-check the reference's CPU. */ 572 KASSERTMSG((psref->psref_cpu == curcpu()), 573 "passive reference transferred from CPU %u to CPU %u", 574 cpu_index(psref->psref_cpu), cpu_index(curcpu())); 575 576 /* If it doesn't match, skip it and move on. */ 577 if (psref->psref_target != target) 578 continue; 579 580 /* 581 * Sanity-check the reference's LWP if we are asserting 582 * via psref_held that this LWP holds it, but not if we 583 * are testing in psref_target_destroy whether any LWP 584 * still holds it. 585 */ 586 KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp), 587 "passive reference transferred from lwp %p to lwp %p", 588 psref->psref_lwp, curlwp); 589 590 /* Stop here and report that we found it. */ 591 held = true; 592 break; 593 } 594 595 /* Release the CPU list and restore interrupts. */ 596 percpu_putref(class->prc_percpu); 597 splx(s); 598 599 return held; 600 } 601 602 /* 603 * psref_held(target, class) 604 * 605 * True if the current CPU holds a passive reference to target, 606 * false otherwise. May be used only inside assertions. 607 */ 608 bool 609 psref_held(const struct psref_target *target, struct psref_class *class) 610 { 611 612 return _psref_held(target, class, false); 613 } 614 615 #ifdef PSREF_DEBUG 616 void 617 psref_debug_init_lwp(struct lwp *l) 618 { 619 struct psref_debug *prd; 620 621 prd = kmem_zalloc(sizeof(*prd), KM_SLEEP); 622 lwp_setspecific_by_lwp(l, psref_debug_lwp_key, prd); 623 } 624 625 static void 626 psref_debug_lwp_free(void *arg) 627 { 628 struct psref_debug *prd = arg; 629 630 kmem_free(prd, sizeof(*prd)); 631 } 632 633 static void 634 psref_debug_acquire(struct psref *psref) 635 { 636 struct psref_debug *prd; 637 struct lwp *l = curlwp; 638 int s, i; 639 640 prd = lwp_getspecific(psref_debug_lwp_key); 641 if (__predict_false(prd == NULL)) { 642 psref->psref_debug = NULL; 643 return; 644 } 645 646 s = splserial(); 647 if (l->l_psrefs > prd->prd_refs_peek) { 648 prd->prd_refs_peek = l->l_psrefs; 649 if (__predict_false(prd->prd_refs_peek > PSREF_DEBUG_NITEMS)) 650 panic("exceeded PSREF_DEBUG_NITEMS"); 651 } 652 for (i = 0; i < prd->prd_refs_peek; i++) { 653 struct psref_debug_item *prdi = &prd->prd_items[i]; 654 if (prdi->prdi_psref != NULL) 655 continue; 656 prdi->prdi_caller = psref->psref_debug; 657 prdi->prdi_psref = psref; 658 psref->psref_debug = prdi; 659 break; 660 } 661 if (__predict_false(i == prd->prd_refs_peek)) 662 panic("out of range: %d", i); 663 splx(s); 664 } 665 666 static void 667 psref_debug_release(struct psref *psref) 668 { 669 int s; 670 671 s = splserial(); 672 if (__predict_true(psref->psref_debug != NULL)) { 673 struct psref_debug_item *prdi = psref->psref_debug; 674 prdi->prdi_psref = NULL; 675 } 676 splx(s); 677 } 678 679 void 680 psref_debug_barrier(void) 681 { 682 struct psref_debug *prd; 683 struct lwp *l = curlwp; 684 int s, i; 685 686 prd = lwp_getspecific(psref_debug_lwp_key); 687 if (__predict_false(prd == NULL)) 688 return; 689 690 s = splserial(); 691 for (i = 0; i < prd->prd_refs_peek; i++) { 692 struct psref_debug_item *prdi = &prd->prd_items[i]; 693 if (__predict_true(prdi->prdi_psref == NULL)) 694 continue; 695 panic("psref leaked: lwp(%p) acquired at %p", l, prdi->prdi_caller); 696 } 697 prd->prd_refs_peek = 0; /* Reset the counter */ 698 splx(s); 699 } 700 #endif /* PSREF_DEBUG */ 701