1 /* $NetBSD: subr_psref.c,v 1.7 2017/06/01 02:45:13 chs Exp $ */ 2 3 /*- 4 * Copyright (c) 2016 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Passive references 34 * 35 * Passive references are references to objects that guarantee the 36 * object will not be destroyed until the reference is released. 37 * 38 * Passive references require no interprocessor synchronization to 39 * acquire or release. However, destroying the target of passive 40 * references requires expensive interprocessor synchronization -- 41 * xcalls to determine on which CPUs the object is still in use. 42 * 43 * Passive references may be held only on a single CPU and by a 44 * single LWP. They require the caller to allocate a little stack 45 * space, a struct psref object. Sleeping while a passive 46 * reference is held is allowed, provided that the owner's LWP is 47 * bound to a CPU -- e.g., the owner is a softint or a bound 48 * kthread. However, sleeping should be kept to a short duration, 49 * e.g. sleeping on an adaptive lock. 50 * 51 * Passive references serve as an intermediate stage between 52 * reference counting and passive serialization (pserialize(9)): 53 * 54 * - If you need references to transfer from CPU to CPU or LWP to 55 * LWP, or if you need long-term references, you must use 56 * reference counting, e.g. with atomic operations or locks, 57 * which incurs interprocessor synchronization for every use -- 58 * cheaper than an xcall, but not scalable. 59 * 60 * - If all users *guarantee* that they will not sleep, then it is 61 * not necessary to use passive references: you may as well just 62 * use the even cheaper pserialize(9), because you have 63 * satisfied the requirements of a pserialize read section. 64 */ 65 66 #include <sys/cdefs.h> 67 __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.7 2017/06/01 02:45:13 chs Exp $"); 68 69 #include <sys/types.h> 70 #include <sys/condvar.h> 71 #include <sys/cpu.h> 72 #include <sys/intr.h> 73 #include <sys/kmem.h> 74 #include <sys/lwp.h> 75 #include <sys/mutex.h> 76 #include <sys/percpu.h> 77 #include <sys/psref.h> 78 #include <sys/queue.h> 79 #include <sys/xcall.h> 80 81 LIST_HEAD(psref_head, psref); 82 83 static bool _psref_held(const struct psref_target *, struct psref_class *, 84 bool); 85 86 /* 87 * struct psref_class 88 * 89 * Private global state for a class of passive reference targets. 90 * Opaque to callers. 91 */ 92 struct psref_class { 93 kmutex_t prc_lock; 94 kcondvar_t prc_cv; 95 struct percpu *prc_percpu; /* struct psref_cpu */ 96 ipl_cookie_t prc_iplcookie; 97 }; 98 99 /* 100 * struct psref_cpu 101 * 102 * Private per-CPU state for a class of passive reference targets. 103 * Not exposed by the API. 104 */ 105 struct psref_cpu { 106 struct psref_head pcpu_head; 107 }; 108 109 /* 110 * psref_class_create(name, ipl) 111 * 112 * Create a new passive reference class, with the given wchan name 113 * and ipl. 114 */ 115 struct psref_class * 116 psref_class_create(const char *name, int ipl) 117 { 118 struct psref_class *class; 119 120 ASSERT_SLEEPABLE(); 121 122 class = kmem_alloc(sizeof(*class), KM_SLEEP); 123 class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu)); 124 mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl); 125 cv_init(&class->prc_cv, name); 126 class->prc_iplcookie = makeiplcookie(ipl); 127 128 return class; 129 } 130 131 #ifdef DIAGNOSTIC 132 static void 133 psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused) 134 { 135 const struct psref_cpu *pcpu = p; 136 bool *retp = cookie; 137 138 if (!LIST_EMPTY(&pcpu->pcpu_head)) 139 *retp = false; 140 } 141 142 static bool 143 psref_class_drained_p(const struct psref_class *prc) 144 { 145 bool ret = true; 146 147 percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret); 148 149 return ret; 150 } 151 #endif /* DIAGNOSTIC */ 152 153 /* 154 * psref_class_destroy(class) 155 * 156 * Destroy a passive reference class and free memory associated 157 * with it. All targets in this class must have been drained and 158 * destroyed already. 159 */ 160 void 161 psref_class_destroy(struct psref_class *class) 162 { 163 164 KASSERT(psref_class_drained_p(class)); 165 166 cv_destroy(&class->prc_cv); 167 mutex_destroy(&class->prc_lock); 168 percpu_free(class->prc_percpu, sizeof(struct psref_cpu)); 169 kmem_free(class, sizeof(*class)); 170 } 171 172 /* 173 * psref_target_init(target, class) 174 * 175 * Initialize a passive reference target in the specified class. 176 * The caller is responsible for issuing a membar_producer after 177 * psref_target_init and before exposing a pointer to the target 178 * to other CPUs. 179 */ 180 void 181 psref_target_init(struct psref_target *target, 182 struct psref_class *class) 183 { 184 185 target->prt_class = class; 186 target->prt_draining = false; 187 } 188 189 #ifdef DEBUG 190 static void 191 psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref, 192 const struct psref_target *target) 193 { 194 bool found = false; 195 struct psref *_psref; 196 197 LIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) { 198 if (_psref == psref && 199 _psref->psref_target == target) { 200 found = true; 201 break; 202 } 203 } 204 if (found) { 205 panic("trying to acquire a target twice with the same psref: " 206 "psref=%p target=%p", psref, target); 207 } 208 } 209 #endif /* DEBUG */ 210 211 /* 212 * psref_acquire(psref, target, class) 213 * 214 * Acquire a passive reference to the specified target, which must 215 * be in the specified class. 216 * 217 * The caller must guarantee that the target will not be destroyed 218 * before psref_acquire returns. 219 * 220 * The caller must additionally guarantee that it will not switch 221 * CPUs before releasing the passive reference, either by 222 * disabling kpreemption and avoiding sleeps, or by being in a 223 * softint or in an LWP bound to a CPU. 224 */ 225 void 226 psref_acquire(struct psref *psref, const struct psref_target *target, 227 struct psref_class *class) 228 { 229 struct psref_cpu *pcpu; 230 int s; 231 232 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 233 ISSET(curlwp->l_pflag, LP_BOUND)), 234 "passive references are CPU-local," 235 " but preemption is enabled and the caller is not" 236 " in a softint or CPU-bound LWP"); 237 KASSERTMSG((target->prt_class == class), 238 "mismatched psref target class: %p (ref) != %p (expected)", 239 target->prt_class, class); 240 KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p", 241 target); 242 243 /* Block interrupts and acquire the current CPU's reference list. */ 244 s = splraiseipl(class->prc_iplcookie); 245 pcpu = percpu_getref(class->prc_percpu); 246 247 #ifdef DEBUG 248 /* Sanity-check if the target is already acquired with the same psref. */ 249 psref_check_duplication(pcpu, psref, target); 250 #endif 251 252 /* Record our reference. */ 253 LIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry); 254 psref->psref_target = target; 255 psref->psref_lwp = curlwp; 256 psref->psref_cpu = curcpu(); 257 258 /* Release the CPU list and restore interrupts. */ 259 percpu_putref(class->prc_percpu); 260 splx(s); 261 } 262 263 /* 264 * psref_release(psref, target, class) 265 * 266 * Release a passive reference to the specified target, which must 267 * be in the specified class. 268 * 269 * The caller must not have switched CPUs or LWPs since acquiring 270 * the passive reference. 271 */ 272 void 273 psref_release(struct psref *psref, const struct psref_target *target, 274 struct psref_class *class) 275 { 276 int s; 277 278 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 279 ISSET(curlwp->l_pflag, LP_BOUND)), 280 "passive references are CPU-local," 281 " but preemption is enabled and the caller is not" 282 " in a softint or CPU-bound LWP"); 283 KASSERTMSG((target->prt_class == class), 284 "mismatched psref target class: %p (ref) != %p (expected)", 285 target->prt_class, class); 286 287 /* Make sure the psref looks sensible. */ 288 KASSERTMSG((psref->psref_target == target), 289 "passive reference target mismatch: %p (ref) != %p (expected)", 290 psref->psref_target, target); 291 KASSERTMSG((psref->psref_lwp == curlwp), 292 "passive reference transferred from lwp %p to lwp %p", 293 psref->psref_lwp, curlwp); 294 KASSERTMSG((psref->psref_cpu == curcpu()), 295 "passive reference transferred from CPU %u to CPU %u", 296 cpu_index(psref->psref_cpu), cpu_index(curcpu())); 297 298 /* 299 * Block interrupts and remove the psref from the current CPU's 300 * list. No need to percpu_getref or get the head of the list, 301 * and the caller guarantees that we are bound to a CPU anyway 302 * (as does blocking interrupts). 303 */ 304 s = splraiseipl(class->prc_iplcookie); 305 LIST_REMOVE(psref, psref_entry); 306 splx(s); 307 308 /* If someone is waiting for users to drain, notify 'em. */ 309 if (__predict_false(target->prt_draining)) 310 cv_broadcast(&class->prc_cv); 311 } 312 313 /* 314 * psref_copy(pto, pfrom, class) 315 * 316 * Copy a passive reference from pfrom, which must be in the 317 * specified class, to pto. Both pfrom and pto must later be 318 * released with psref_release. 319 * 320 * The caller must not have switched CPUs or LWPs since acquiring 321 * pfrom, and must not switch CPUs or LWPs before releasing both 322 * pfrom and pto. 323 */ 324 void 325 psref_copy(struct psref *pto, const struct psref *pfrom, 326 struct psref_class *class) 327 { 328 struct psref_cpu *pcpu; 329 int s; 330 331 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 332 ISSET(curlwp->l_pflag, LP_BOUND)), 333 "passive references are CPU-local," 334 " but preemption is enabled and the caller is not" 335 " in a softint or CPU-bound LWP"); 336 KASSERTMSG((pto != pfrom), 337 "can't copy passive reference to itself: %p", 338 pto); 339 340 /* Make sure the pfrom reference looks sensible. */ 341 KASSERTMSG((pfrom->psref_lwp == curlwp), 342 "passive reference transferred from lwp %p to lwp %p", 343 pfrom->psref_lwp, curlwp); 344 KASSERTMSG((pfrom->psref_cpu == curcpu()), 345 "passive reference transferred from CPU %u to CPU %u", 346 cpu_index(pfrom->psref_cpu), cpu_index(curcpu())); 347 KASSERTMSG((pfrom->psref_target->prt_class == class), 348 "mismatched psref target class: %p (ref) != %p (expected)", 349 pfrom->psref_target->prt_class, class); 350 351 /* Block interrupts and acquire the current CPU's reference list. */ 352 s = splraiseipl(class->prc_iplcookie); 353 pcpu = percpu_getref(class->prc_percpu); 354 355 /* Record the new reference. */ 356 LIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry); 357 pto->psref_target = pfrom->psref_target; 358 pto->psref_lwp = curlwp; 359 pto->psref_cpu = curcpu(); 360 361 /* Release the CPU list and restore interrupts. */ 362 percpu_putref(class->prc_percpu); 363 splx(s); 364 } 365 366 /* 367 * struct psreffed 368 * 369 * Global state for draining a psref target. 370 */ 371 struct psreffed { 372 struct psref_class *class; 373 struct psref_target *target; 374 bool ret; 375 }; 376 377 static void 378 psreffed_p_xc(void *cookie0, void *cookie1 __unused) 379 { 380 struct psreffed *P = cookie0; 381 382 /* 383 * If we hold a psref to the target, then answer true. 384 * 385 * This is the only dynamic decision that may be made with 386 * psref_held. 387 * 388 * No need to lock anything here: every write transitions from 389 * false to true, so there can be no conflicting writes. No 390 * need for a memory barrier here because P->ret is read only 391 * after xc_wait, which has already issued any necessary memory 392 * barriers. 393 */ 394 if (_psref_held(P->target, P->class, true)) 395 P->ret = true; 396 } 397 398 static bool 399 psreffed_p(struct psref_target *target, struct psref_class *class) 400 { 401 struct psreffed P = { 402 .class = class, 403 .target = target, 404 .ret = false, 405 }; 406 407 /* Ask all CPUs to say whether they hold a psref to the target. */ 408 xc_wait(xc_broadcast(0, &psreffed_p_xc, &P, NULL)); 409 410 return P.ret; 411 } 412 413 /* 414 * psref_target_destroy(target, class) 415 * 416 * Destroy a passive reference target. Waits for all existing 417 * references to drain. Caller must guarantee no new references 418 * will be acquired once it calls psref_target_destroy, e.g. by 419 * removing the target from a global list first. May sleep. 420 */ 421 void 422 psref_target_destroy(struct psref_target *target, struct psref_class *class) 423 { 424 425 ASSERT_SLEEPABLE(); 426 427 KASSERTMSG((target->prt_class == class), 428 "mismatched psref target class: %p (ref) != %p (expected)", 429 target->prt_class, class); 430 431 /* Request psref_release to notify us when done. */ 432 KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p", 433 target); 434 target->prt_draining = true; 435 436 /* Wait until there are no more references on any CPU. */ 437 while (psreffed_p(target, class)) { 438 /* 439 * This enter/wait/exit business looks wrong, but it is 440 * both necessary, because psreffed_p performs a 441 * low-priority xcall and hence cannot run while a 442 * mutex is locked, and OK, because the wait is timed 443 * -- explicit wakeups are only an optimization. 444 */ 445 mutex_enter(&class->prc_lock); 446 (void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1); 447 mutex_exit(&class->prc_lock); 448 } 449 450 /* No more references. Cause subsequent psref_acquire to kassert. */ 451 target->prt_class = NULL; 452 } 453 454 static bool 455 _psref_held(const struct psref_target *target, struct psref_class *class, 456 bool lwp_mismatch_ok) 457 { 458 const struct psref_cpu *pcpu; 459 const struct psref *psref; 460 int s; 461 bool held = false; 462 463 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 464 ISSET(curlwp->l_pflag, LP_BOUND)), 465 "passive references are CPU-local," 466 " but preemption is enabled and the caller is not" 467 " in a softint or CPU-bound LWP"); 468 KASSERTMSG((target->prt_class == class), 469 "mismatched psref target class: %p (ref) != %p (expected)", 470 target->prt_class, class); 471 472 /* Block interrupts and acquire the current CPU's reference list. */ 473 s = splraiseipl(class->prc_iplcookie); 474 pcpu = percpu_getref(class->prc_percpu); 475 476 /* Search through all the references on this CPU. */ 477 LIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) { 478 /* Sanity-check the reference's CPU. */ 479 KASSERTMSG((psref->psref_cpu == curcpu()), 480 "passive reference transferred from CPU %u to CPU %u", 481 cpu_index(psref->psref_cpu), cpu_index(curcpu())); 482 483 /* If it doesn't match, skip it and move on. */ 484 if (psref->psref_target != target) 485 continue; 486 487 /* 488 * Sanity-check the reference's LWP if we are asserting 489 * via psref_held that this LWP holds it, but not if we 490 * are testing in psref_target_destroy whether any LWP 491 * still holds it. 492 */ 493 KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp), 494 "passive reference transferred from lwp %p to lwp %p", 495 psref->psref_lwp, curlwp); 496 497 /* Stop here and report that we found it. */ 498 held = true; 499 break; 500 } 501 502 /* Release the CPU list and restore interrupts. */ 503 percpu_putref(class->prc_percpu); 504 splx(s); 505 506 return held; 507 } 508 509 /* 510 * psref_held(target, class) 511 * 512 * True if the current CPU holds a passive reference to target, 513 * false otherwise. May be used only inside assertions. 514 */ 515 bool 516 psref_held(const struct psref_target *target, struct psref_class *class) 517 { 518 519 return _psref_held(target, class, false); 520 } 521