1 /* $NetBSD: subr_psref.c,v 1.11 2018/02/01 03:17:00 ozaki-r Exp $ */ 2 3 /*- 4 * Copyright (c) 2016 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Passive references 34 * 35 * Passive references are references to objects that guarantee the 36 * object will not be destroyed until the reference is released. 37 * 38 * Passive references require no interprocessor synchronization to 39 * acquire or release. However, destroying the target of passive 40 * references requires expensive interprocessor synchronization -- 41 * xcalls to determine on which CPUs the object is still in use. 42 * 43 * Passive references may be held only on a single CPU and by a 44 * single LWP. They require the caller to allocate a little stack 45 * space, a struct psref object. Sleeping while a passive 46 * reference is held is allowed, provided that the owner's LWP is 47 * bound to a CPU -- e.g., the owner is a softint or a bound 48 * kthread. However, sleeping should be kept to a short duration, 49 * e.g. sleeping on an adaptive lock. 50 * 51 * Passive references serve as an intermediate stage between 52 * reference counting and passive serialization (pserialize(9)): 53 * 54 * - If you need references to transfer from CPU to CPU or LWP to 55 * LWP, or if you need long-term references, you must use 56 * reference counting, e.g. with atomic operations or locks, 57 * which incurs interprocessor synchronization for every use -- 58 * cheaper than an xcall, but not scalable. 59 * 60 * - If all users *guarantee* that they will not sleep, then it is 61 * not necessary to use passive references: you may as well just 62 * use the even cheaper pserialize(9), because you have 63 * satisfied the requirements of a pserialize read section. 64 */ 65 66 #include <sys/cdefs.h> 67 __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.11 2018/02/01 03:17:00 ozaki-r Exp $"); 68 69 #include <sys/types.h> 70 #include <sys/condvar.h> 71 #include <sys/cpu.h> 72 #include <sys/intr.h> 73 #include <sys/kmem.h> 74 #include <sys/lwp.h> 75 #include <sys/mutex.h> 76 #include <sys/percpu.h> 77 #include <sys/psref.h> 78 #include <sys/queue.h> 79 #include <sys/xcall.h> 80 81 SLIST_HEAD(psref_head, psref); 82 83 static bool _psref_held(const struct psref_target *, struct psref_class *, 84 bool); 85 86 /* 87 * struct psref_class 88 * 89 * Private global state for a class of passive reference targets. 90 * Opaque to callers. 91 */ 92 struct psref_class { 93 kmutex_t prc_lock; 94 kcondvar_t prc_cv; 95 struct percpu *prc_percpu; /* struct psref_cpu */ 96 ipl_cookie_t prc_iplcookie; 97 unsigned int prc_xc_flags; 98 }; 99 100 /* 101 * struct psref_cpu 102 * 103 * Private per-CPU state for a class of passive reference targets. 104 * Not exposed by the API. 105 */ 106 struct psref_cpu { 107 struct psref_head pcpu_head; 108 }; 109 110 /* 111 * psref_class_create(name, ipl) 112 * 113 * Create a new passive reference class, with the given wchan name 114 * and ipl. 115 */ 116 struct psref_class * 117 psref_class_create(const char *name, int ipl) 118 { 119 struct psref_class *class; 120 121 ASSERT_SLEEPABLE(); 122 123 class = kmem_alloc(sizeof(*class), KM_SLEEP); 124 class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu)); 125 mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl); 126 cv_init(&class->prc_cv, name); 127 class->prc_iplcookie = makeiplcookie(ipl); 128 class->prc_xc_flags = XC_HIGHPRI_IPL(ipl); 129 130 return class; 131 } 132 133 #ifdef DIAGNOSTIC 134 static void 135 psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused) 136 { 137 const struct psref_cpu *pcpu = p; 138 bool *retp = cookie; 139 140 if (!SLIST_EMPTY(&pcpu->pcpu_head)) 141 *retp = false; 142 } 143 144 static bool 145 psref_class_drained_p(const struct psref_class *prc) 146 { 147 bool ret = true; 148 149 percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret); 150 151 return ret; 152 } 153 #endif /* DIAGNOSTIC */ 154 155 /* 156 * psref_class_destroy(class) 157 * 158 * Destroy a passive reference class and free memory associated 159 * with it. All targets in this class must have been drained and 160 * destroyed already. 161 */ 162 void 163 psref_class_destroy(struct psref_class *class) 164 { 165 166 KASSERT(psref_class_drained_p(class)); 167 168 cv_destroy(&class->prc_cv); 169 mutex_destroy(&class->prc_lock); 170 percpu_free(class->prc_percpu, sizeof(struct psref_cpu)); 171 kmem_free(class, sizeof(*class)); 172 } 173 174 /* 175 * psref_target_init(target, class) 176 * 177 * Initialize a passive reference target in the specified class. 178 * The caller is responsible for issuing a membar_producer after 179 * psref_target_init and before exposing a pointer to the target 180 * to other CPUs. 181 */ 182 void 183 psref_target_init(struct psref_target *target, 184 struct psref_class *class) 185 { 186 187 target->prt_class = class; 188 target->prt_draining = false; 189 } 190 191 #ifdef DEBUG 192 static bool 193 psref_exist(struct psref_cpu *pcpu, struct psref *psref) 194 { 195 struct psref *_psref; 196 197 SLIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) { 198 if (_psref == psref) 199 return true; 200 } 201 return false; 202 } 203 204 static void 205 psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref, 206 const struct psref_target *target) 207 { 208 bool found = false; 209 210 found = psref_exist(pcpu, psref); 211 if (found) { 212 panic("The psref is already in the list (acquiring twice?): " 213 "psref=%p target=%p", psref, target); 214 } 215 } 216 217 static void 218 psref_check_existence(struct psref_cpu *pcpu, struct psref *psref, 219 const struct psref_target *target) 220 { 221 bool found = false; 222 223 found = psref_exist(pcpu, psref); 224 if (!found) { 225 panic("The psref isn't in the list (releasing unused psref?): " 226 "psref=%p target=%p", psref, target); 227 } 228 } 229 #endif /* DEBUG */ 230 231 /* 232 * psref_acquire(psref, target, class) 233 * 234 * Acquire a passive reference to the specified target, which must 235 * be in the specified class. 236 * 237 * The caller must guarantee that the target will not be destroyed 238 * before psref_acquire returns. 239 * 240 * The caller must additionally guarantee that it will not switch 241 * CPUs before releasing the passive reference, either by 242 * disabling kpreemption and avoiding sleeps, or by being in a 243 * softint or in an LWP bound to a CPU. 244 */ 245 void 246 psref_acquire(struct psref *psref, const struct psref_target *target, 247 struct psref_class *class) 248 { 249 struct psref_cpu *pcpu; 250 int s; 251 252 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 253 ISSET(curlwp->l_pflag, LP_BOUND)), 254 "passive references are CPU-local," 255 " but preemption is enabled and the caller is not" 256 " in a softint or CPU-bound LWP"); 257 KASSERTMSG((target->prt_class == class), 258 "mismatched psref target class: %p (ref) != %p (expected)", 259 target->prt_class, class); 260 KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p", 261 target); 262 263 /* Block interrupts and acquire the current CPU's reference list. */ 264 s = splraiseipl(class->prc_iplcookie); 265 pcpu = percpu_getref(class->prc_percpu); 266 267 #ifdef DEBUG 268 /* Sanity-check if the target is already acquired with the same psref. */ 269 psref_check_duplication(pcpu, psref, target); 270 #endif 271 272 /* Record our reference. */ 273 SLIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry); 274 psref->psref_target = target; 275 psref->psref_lwp = curlwp; 276 psref->psref_cpu = curcpu(); 277 278 /* Release the CPU list and restore interrupts. */ 279 percpu_putref(class->prc_percpu); 280 splx(s); 281 } 282 283 /* 284 * psref_release(psref, target, class) 285 * 286 * Release a passive reference to the specified target, which must 287 * be in the specified class. 288 * 289 * The caller must not have switched CPUs or LWPs since acquiring 290 * the passive reference. 291 */ 292 void 293 psref_release(struct psref *psref, const struct psref_target *target, 294 struct psref_class *class) 295 { 296 struct psref_cpu *pcpu; 297 int s; 298 299 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 300 ISSET(curlwp->l_pflag, LP_BOUND)), 301 "passive references are CPU-local," 302 " but preemption is enabled and the caller is not" 303 " in a softint or CPU-bound LWP"); 304 KASSERTMSG((target->prt_class == class), 305 "mismatched psref target class: %p (ref) != %p (expected)", 306 target->prt_class, class); 307 308 /* Make sure the psref looks sensible. */ 309 KASSERTMSG((psref->psref_target == target), 310 "passive reference target mismatch: %p (ref) != %p (expected)", 311 psref->psref_target, target); 312 KASSERTMSG((psref->psref_lwp == curlwp), 313 "passive reference transferred from lwp %p to lwp %p", 314 psref->psref_lwp, curlwp); 315 KASSERTMSG((psref->psref_cpu == curcpu()), 316 "passive reference transferred from CPU %u to CPU %u", 317 cpu_index(psref->psref_cpu), cpu_index(curcpu())); 318 319 /* 320 * Block interrupts and remove the psref from the current CPU's 321 * list. No need to percpu_getref or get the head of the list, 322 * and the caller guarantees that we are bound to a CPU anyway 323 * (as does blocking interrupts). 324 */ 325 s = splraiseipl(class->prc_iplcookie); 326 pcpu = percpu_getref(class->prc_percpu); 327 #ifdef DEBUG 328 /* Sanity-check if the target is surely acquired before. */ 329 psref_check_existence(pcpu, psref, target); 330 #endif 331 SLIST_REMOVE(&pcpu->pcpu_head, psref, psref, psref_entry); 332 percpu_putref(class->prc_percpu); 333 splx(s); 334 335 /* If someone is waiting for users to drain, notify 'em. */ 336 if (__predict_false(target->prt_draining)) 337 cv_broadcast(&class->prc_cv); 338 } 339 340 /* 341 * psref_copy(pto, pfrom, class) 342 * 343 * Copy a passive reference from pfrom, which must be in the 344 * specified class, to pto. Both pfrom and pto must later be 345 * released with psref_release. 346 * 347 * The caller must not have switched CPUs or LWPs since acquiring 348 * pfrom, and must not switch CPUs or LWPs before releasing both 349 * pfrom and pto. 350 */ 351 void 352 psref_copy(struct psref *pto, const struct psref *pfrom, 353 struct psref_class *class) 354 { 355 struct psref_cpu *pcpu; 356 int s; 357 358 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 359 ISSET(curlwp->l_pflag, LP_BOUND)), 360 "passive references are CPU-local," 361 " but preemption is enabled and the caller is not" 362 " in a softint or CPU-bound LWP"); 363 KASSERTMSG((pto != pfrom), 364 "can't copy passive reference to itself: %p", 365 pto); 366 367 /* Make sure the pfrom reference looks sensible. */ 368 KASSERTMSG((pfrom->psref_lwp == curlwp), 369 "passive reference transferred from lwp %p to lwp %p", 370 pfrom->psref_lwp, curlwp); 371 KASSERTMSG((pfrom->psref_cpu == curcpu()), 372 "passive reference transferred from CPU %u to CPU %u", 373 cpu_index(pfrom->psref_cpu), cpu_index(curcpu())); 374 KASSERTMSG((pfrom->psref_target->prt_class == class), 375 "mismatched psref target class: %p (ref) != %p (expected)", 376 pfrom->psref_target->prt_class, class); 377 378 /* Block interrupts and acquire the current CPU's reference list. */ 379 s = splraiseipl(class->prc_iplcookie); 380 pcpu = percpu_getref(class->prc_percpu); 381 382 /* Record the new reference. */ 383 SLIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry); 384 pto->psref_target = pfrom->psref_target; 385 pto->psref_lwp = curlwp; 386 pto->psref_cpu = curcpu(); 387 388 /* Release the CPU list and restore interrupts. */ 389 percpu_putref(class->prc_percpu); 390 splx(s); 391 } 392 393 /* 394 * struct psreffed 395 * 396 * Global state for draining a psref target. 397 */ 398 struct psreffed { 399 struct psref_class *class; 400 struct psref_target *target; 401 bool ret; 402 }; 403 404 static void 405 psreffed_p_xc(void *cookie0, void *cookie1 __unused) 406 { 407 struct psreffed *P = cookie0; 408 409 /* 410 * If we hold a psref to the target, then answer true. 411 * 412 * This is the only dynamic decision that may be made with 413 * psref_held. 414 * 415 * No need to lock anything here: every write transitions from 416 * false to true, so there can be no conflicting writes. No 417 * need for a memory barrier here because P->ret is read only 418 * after xc_wait, which has already issued any necessary memory 419 * barriers. 420 */ 421 if (_psref_held(P->target, P->class, true)) 422 P->ret = true; 423 } 424 425 static bool 426 psreffed_p(struct psref_target *target, struct psref_class *class) 427 { 428 struct psreffed P = { 429 .class = class, 430 .target = target, 431 .ret = false, 432 }; 433 434 if (__predict_true(mp_online)) { 435 /* 436 * Ask all CPUs to say whether they hold a psref to the 437 * target. 438 */ 439 xc_wait(xc_broadcast(class->prc_xc_flags, &psreffed_p_xc, &P, 440 NULL)); 441 } else 442 psreffed_p_xc(&P, NULL); 443 444 return P.ret; 445 } 446 447 /* 448 * psref_target_destroy(target, class) 449 * 450 * Destroy a passive reference target. Waits for all existing 451 * references to drain. Caller must guarantee no new references 452 * will be acquired once it calls psref_target_destroy, e.g. by 453 * removing the target from a global list first. May sleep. 454 */ 455 void 456 psref_target_destroy(struct psref_target *target, struct psref_class *class) 457 { 458 459 ASSERT_SLEEPABLE(); 460 461 KASSERTMSG((target->prt_class == class), 462 "mismatched psref target class: %p (ref) != %p (expected)", 463 target->prt_class, class); 464 465 /* Request psref_release to notify us when done. */ 466 KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p", 467 target); 468 target->prt_draining = true; 469 470 /* Wait until there are no more references on any CPU. */ 471 while (psreffed_p(target, class)) { 472 /* 473 * This enter/wait/exit business looks wrong, but it is 474 * both necessary, because psreffed_p performs a 475 * low-priority xcall and hence cannot run while a 476 * mutex is locked, and OK, because the wait is timed 477 * -- explicit wakeups are only an optimization. 478 */ 479 mutex_enter(&class->prc_lock); 480 (void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1); 481 mutex_exit(&class->prc_lock); 482 } 483 484 /* No more references. Cause subsequent psref_acquire to kassert. */ 485 target->prt_class = NULL; 486 } 487 488 static bool 489 _psref_held(const struct psref_target *target, struct psref_class *class, 490 bool lwp_mismatch_ok) 491 { 492 const struct psref_cpu *pcpu; 493 const struct psref *psref; 494 int s; 495 bool held = false; 496 497 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 498 ISSET(curlwp->l_pflag, LP_BOUND)), 499 "passive references are CPU-local," 500 " but preemption is enabled and the caller is not" 501 " in a softint or CPU-bound LWP"); 502 KASSERTMSG((target->prt_class == class), 503 "mismatched psref target class: %p (ref) != %p (expected)", 504 target->prt_class, class); 505 506 /* Block interrupts and acquire the current CPU's reference list. */ 507 s = splraiseipl(class->prc_iplcookie); 508 pcpu = percpu_getref(class->prc_percpu); 509 510 /* Search through all the references on this CPU. */ 511 SLIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) { 512 /* Sanity-check the reference's CPU. */ 513 KASSERTMSG((psref->psref_cpu == curcpu()), 514 "passive reference transferred from CPU %u to CPU %u", 515 cpu_index(psref->psref_cpu), cpu_index(curcpu())); 516 517 /* If it doesn't match, skip it and move on. */ 518 if (psref->psref_target != target) 519 continue; 520 521 /* 522 * Sanity-check the reference's LWP if we are asserting 523 * via psref_held that this LWP holds it, but not if we 524 * are testing in psref_target_destroy whether any LWP 525 * still holds it. 526 */ 527 KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp), 528 "passive reference transferred from lwp %p to lwp %p", 529 psref->psref_lwp, curlwp); 530 531 /* Stop here and report that we found it. */ 532 held = true; 533 break; 534 } 535 536 /* Release the CPU list and restore interrupts. */ 537 percpu_putref(class->prc_percpu); 538 splx(s); 539 540 return held; 541 } 542 543 /* 544 * psref_held(target, class) 545 * 546 * True if the current CPU holds a passive reference to target, 547 * false otherwise. May be used only inside assertions. 548 */ 549 bool 550 psref_held(const struct psref_target *target, struct psref_class *class) 551 { 552 553 return _psref_held(target, class, false); 554 } 555