xref: /netbsd-src/sys/kern/subr_psref.c (revision 2e2322c9c07009df921d11b1268f8506affbb8ba)
1 /*	$NetBSD: subr_psref.c,v 1.6 2016/11/09 09:00:46 ozaki-r Exp $	*/
2 
3 /*-
4  * Copyright (c) 2016 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Passive references
34  *
35  *	Passive references are references to objects that guarantee the
36  *	object will not be destroyed until the reference is released.
37  *
38  *	Passive references require no interprocessor synchronization to
39  *	acquire or release.  However, destroying the target of passive
40  *	references requires expensive interprocessor synchronization --
41  *	xcalls to determine on which CPUs the object is still in use.
42  *
43  *	Passive references may be held only on a single CPU and by a
44  *	single LWP.  They require the caller to allocate a little stack
45  *	space, a struct psref object.  Sleeping while a passive
46  *	reference is held is allowed, provided that the owner's LWP is
47  *	bound to a CPU -- e.g., the owner is a softint or a bound
48  *	kthread.  However, sleeping should be kept to a short duration,
49  *	e.g. sleeping on an adaptive lock.
50  *
51  *	Passive references serve as an intermediate stage between
52  *	reference counting and passive serialization (pserialize(9)):
53  *
54  *	- If you need references to transfer from CPU to CPU or LWP to
55  *	  LWP, or if you need long-term references, you must use
56  *	  reference counting, e.g. with atomic operations or locks,
57  *	  which incurs interprocessor synchronization for every use --
58  *	  cheaper than an xcall, but not scalable.
59  *
60  *	- If all users *guarantee* that they will not sleep, then it is
61  *	  not necessary to use passive references: you may as well just
62  *	  use the even cheaper pserialize(9), because you have
63  *	  satisfied the requirements of a pserialize read section.
64  */
65 
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.6 2016/11/09 09:00:46 ozaki-r Exp $");
68 
69 #include <sys/types.h>
70 #include <sys/condvar.h>
71 #include <sys/cpu.h>
72 #include <sys/intr.h>
73 #include <sys/kmem.h>
74 #include <sys/lwp.h>
75 #include <sys/mutex.h>
76 #include <sys/percpu.h>
77 #include <sys/psref.h>
78 #include <sys/queue.h>
79 #include <sys/xcall.h>
80 
81 LIST_HEAD(psref_head, psref);
82 
83 static bool	_psref_held(const struct psref_target *, struct psref_class *,
84 		    bool);
85 
86 /*
87  * struct psref_class
88  *
89  *	Private global state for a class of passive reference targets.
90  *	Opaque to callers.
91  */
92 struct psref_class {
93 	kmutex_t		prc_lock;
94 	kcondvar_t		prc_cv;
95 	struct percpu		*prc_percpu; /* struct psref_cpu */
96 	ipl_cookie_t		prc_iplcookie;
97 };
98 
99 /*
100  * struct psref_cpu
101  *
102  *	Private per-CPU state for a class of passive reference targets.
103  *	Not exposed by the API.
104  */
105 struct psref_cpu {
106 	struct psref_head	pcpu_head;
107 };
108 
109 /*
110  * psref_class_create(name, ipl)
111  *
112  *	Create a new passive reference class, with the given wchan name
113  *	and ipl.
114  */
115 struct psref_class *
116 psref_class_create(const char *name, int ipl)
117 {
118 	struct psref_class *class;
119 
120 	ASSERT_SLEEPABLE();
121 
122 	class = kmem_alloc(sizeof(*class), KM_SLEEP);
123 	if (class == NULL)
124 		goto fail0;
125 
126 	class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu));
127 	if (class->prc_percpu == NULL)
128 		goto fail1;
129 
130 	mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl);
131 	cv_init(&class->prc_cv, name);
132 	class->prc_iplcookie = makeiplcookie(ipl);
133 
134 	return class;
135 
136 fail1:	kmem_free(class, sizeof(*class));
137 fail0:	return NULL;
138 }
139 
140 #ifdef DIAGNOSTIC
141 static void
142 psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused)
143 {
144 	const struct psref_cpu *pcpu = p;
145 	bool *retp = cookie;
146 
147 	if (!LIST_EMPTY(&pcpu->pcpu_head))
148 		*retp = false;
149 }
150 
151 static bool
152 psref_class_drained_p(const struct psref_class *prc)
153 {
154 	bool ret = true;
155 
156 	percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret);
157 
158 	return ret;
159 }
160 #endif	/* DIAGNOSTIC */
161 
162 /*
163  * psref_class_destroy(class)
164  *
165  *	Destroy a passive reference class and free memory associated
166  *	with it.  All targets in this class must have been drained and
167  *	destroyed already.
168  */
169 void
170 psref_class_destroy(struct psref_class *class)
171 {
172 
173 	KASSERT(psref_class_drained_p(class));
174 
175 	cv_destroy(&class->prc_cv);
176 	mutex_destroy(&class->prc_lock);
177 	percpu_free(class->prc_percpu, sizeof(struct psref_cpu));
178 	kmem_free(class, sizeof(*class));
179 }
180 
181 /*
182  * psref_target_init(target, class)
183  *
184  *	Initialize a passive reference target in the specified class.
185  *	The caller is responsible for issuing a membar_producer after
186  *	psref_target_init and before exposing a pointer to the target
187  *	to other CPUs.
188  */
189 void
190 psref_target_init(struct psref_target *target,
191     struct psref_class *class)
192 {
193 
194 	target->prt_class = class;
195 	target->prt_draining = false;
196 }
197 
198 #ifdef DEBUG
199 static void
200 psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref,
201     const struct psref_target *target)
202 {
203 	bool found = false;
204 	struct psref *_psref;
205 
206 	LIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) {
207 		if (_psref == psref &&
208 		    _psref->psref_target == target) {
209 			found = true;
210 			break;
211 		}
212 	}
213 	if (found) {
214 		panic("trying to acquire a target twice with the same psref: "
215 		    "psref=%p target=%p", psref, target);
216 	}
217 }
218 #endif /* DEBUG */
219 
220 /*
221  * psref_acquire(psref, target, class)
222  *
223  *	Acquire a passive reference to the specified target, which must
224  *	be in the specified class.
225  *
226  *	The caller must guarantee that the target will not be destroyed
227  *	before psref_acquire returns.
228  *
229  *	The caller must additionally guarantee that it will not switch
230  *	CPUs before releasing the passive reference, either by
231  *	disabling kpreemption and avoiding sleeps, or by being in a
232  *	softint or in an LWP bound to a CPU.
233  */
234 void
235 psref_acquire(struct psref *psref, const struct psref_target *target,
236     struct psref_class *class)
237 {
238 	struct psref_cpu *pcpu;
239 	int s;
240 
241 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
242 		ISSET(curlwp->l_pflag, LP_BOUND)),
243 	    "passive references are CPU-local,"
244 	    " but preemption is enabled and the caller is not"
245 	    " in a softint or CPU-bound LWP");
246 	KASSERTMSG((target->prt_class == class),
247 	    "mismatched psref target class: %p (ref) != %p (expected)",
248 	    target->prt_class, class);
249 	KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
250 	    target);
251 
252 	/* Block interrupts and acquire the current CPU's reference list.  */
253 	s = splraiseipl(class->prc_iplcookie);
254 	pcpu = percpu_getref(class->prc_percpu);
255 
256 #ifdef DEBUG
257 	/* Sanity-check if the target is already acquired with the same psref.  */
258 	psref_check_duplication(pcpu, psref, target);
259 #endif
260 
261 	/* Record our reference.  */
262 	LIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry);
263 	psref->psref_target = target;
264 	psref->psref_lwp = curlwp;
265 	psref->psref_cpu = curcpu();
266 
267 	/* Release the CPU list and restore interrupts.  */
268 	percpu_putref(class->prc_percpu);
269 	splx(s);
270 }
271 
272 /*
273  * psref_release(psref, target, class)
274  *
275  *	Release a passive reference to the specified target, which must
276  *	be in the specified class.
277  *
278  *	The caller must not have switched CPUs or LWPs since acquiring
279  *	the passive reference.
280  */
281 void
282 psref_release(struct psref *psref, const struct psref_target *target,
283     struct psref_class *class)
284 {
285 	int s;
286 
287 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
288 		ISSET(curlwp->l_pflag, LP_BOUND)),
289 	    "passive references are CPU-local,"
290 	    " but preemption is enabled and the caller is not"
291 	    " in a softint or CPU-bound LWP");
292 	KASSERTMSG((target->prt_class == class),
293 	    "mismatched psref target class: %p (ref) != %p (expected)",
294 	    target->prt_class, class);
295 
296 	/* Make sure the psref looks sensible.  */
297 	KASSERTMSG((psref->psref_target == target),
298 	    "passive reference target mismatch: %p (ref) != %p (expected)",
299 	    psref->psref_target, target);
300 	KASSERTMSG((psref->psref_lwp == curlwp),
301 	    "passive reference transferred from lwp %p to lwp %p",
302 	    psref->psref_lwp, curlwp);
303 	KASSERTMSG((psref->psref_cpu == curcpu()),
304 	    "passive reference transferred from CPU %u to CPU %u",
305 	    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
306 
307 	/*
308 	 * Block interrupts and remove the psref from the current CPU's
309 	 * list.  No need to percpu_getref or get the head of the list,
310 	 * and the caller guarantees that we are bound to a CPU anyway
311 	 * (as does blocking interrupts).
312 	 */
313 	s = splraiseipl(class->prc_iplcookie);
314 	LIST_REMOVE(psref, psref_entry);
315 	splx(s);
316 
317 	/* If someone is waiting for users to drain, notify 'em.  */
318 	if (__predict_false(target->prt_draining))
319 		cv_broadcast(&class->prc_cv);
320 }
321 
322 /*
323  * psref_copy(pto, pfrom, class)
324  *
325  *	Copy a passive reference from pfrom, which must be in the
326  *	specified class, to pto.  Both pfrom and pto must later be
327  *	released with psref_release.
328  *
329  *	The caller must not have switched CPUs or LWPs since acquiring
330  *	pfrom, and must not switch CPUs or LWPs before releasing both
331  *	pfrom and pto.
332  */
333 void
334 psref_copy(struct psref *pto, const struct psref *pfrom,
335     struct psref_class *class)
336 {
337 	struct psref_cpu *pcpu;
338 	int s;
339 
340 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
341 		ISSET(curlwp->l_pflag, LP_BOUND)),
342 	    "passive references are CPU-local,"
343 	    " but preemption is enabled and the caller is not"
344 	    " in a softint or CPU-bound LWP");
345 	KASSERTMSG((pto != pfrom),
346 	    "can't copy passive reference to itself: %p",
347 	    pto);
348 
349 	/* Make sure the pfrom reference looks sensible.  */
350 	KASSERTMSG((pfrom->psref_lwp == curlwp),
351 	    "passive reference transferred from lwp %p to lwp %p",
352 	    pfrom->psref_lwp, curlwp);
353 	KASSERTMSG((pfrom->psref_cpu == curcpu()),
354 	    "passive reference transferred from CPU %u to CPU %u",
355 	    cpu_index(pfrom->psref_cpu), cpu_index(curcpu()));
356 	KASSERTMSG((pfrom->psref_target->prt_class == class),
357 	    "mismatched psref target class: %p (ref) != %p (expected)",
358 	    pfrom->psref_target->prt_class, class);
359 
360 	/* Block interrupts and acquire the current CPU's reference list.  */
361 	s = splraiseipl(class->prc_iplcookie);
362 	pcpu = percpu_getref(class->prc_percpu);
363 
364 	/* Record the new reference.  */
365 	LIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry);
366 	pto->psref_target = pfrom->psref_target;
367 	pto->psref_lwp = curlwp;
368 	pto->psref_cpu = curcpu();
369 
370 	/* Release the CPU list and restore interrupts.  */
371 	percpu_putref(class->prc_percpu);
372 	splx(s);
373 }
374 
375 /*
376  * struct psreffed
377  *
378  *	Global state for draining a psref target.
379  */
380 struct psreffed {
381 	struct psref_class	*class;
382 	struct psref_target	*target;
383 	bool			ret;
384 };
385 
386 static void
387 psreffed_p_xc(void *cookie0, void *cookie1 __unused)
388 {
389 	struct psreffed *P = cookie0;
390 
391 	/*
392 	 * If we hold a psref to the target, then answer true.
393 	 *
394 	 * This is the only dynamic decision that may be made with
395 	 * psref_held.
396 	 *
397 	 * No need to lock anything here: every write transitions from
398 	 * false to true, so there can be no conflicting writes.  No
399 	 * need for a memory barrier here because P->ret is read only
400 	 * after xc_wait, which has already issued any necessary memory
401 	 * barriers.
402 	 */
403 	if (_psref_held(P->target, P->class, true))
404 		P->ret = true;
405 }
406 
407 static bool
408 psreffed_p(struct psref_target *target, struct psref_class *class)
409 {
410 	struct psreffed P = {
411 		.class = class,
412 		.target = target,
413 		.ret = false,
414 	};
415 
416 	/* Ask all CPUs to say whether they hold a psref to the target.  */
417 	xc_wait(xc_broadcast(0, &psreffed_p_xc, &P, NULL));
418 
419 	return P.ret;
420 }
421 
422 /*
423  * psref_target_destroy(target, class)
424  *
425  *	Destroy a passive reference target.  Waits for all existing
426  *	references to drain.  Caller must guarantee no new references
427  *	will be acquired once it calls psref_target_destroy, e.g. by
428  *	removing the target from a global list first.  May sleep.
429  */
430 void
431 psref_target_destroy(struct psref_target *target, struct psref_class *class)
432 {
433 
434 	ASSERT_SLEEPABLE();
435 
436 	KASSERTMSG((target->prt_class == class),
437 	    "mismatched psref target class: %p (ref) != %p (expected)",
438 	    target->prt_class, class);
439 
440 	/* Request psref_release to notify us when done.  */
441 	KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
442 	    target);
443 	target->prt_draining = true;
444 
445 	/* Wait until there are no more references on any CPU.  */
446 	while (psreffed_p(target, class)) {
447 		/*
448 		 * This enter/wait/exit business looks wrong, but it is
449 		 * both necessary, because psreffed_p performs a
450 		 * low-priority xcall and hence cannot run while a
451 		 * mutex is locked, and OK, because the wait is timed
452 		 * -- explicit wakeups are only an optimization.
453 		 */
454 		mutex_enter(&class->prc_lock);
455 		(void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1);
456 		mutex_exit(&class->prc_lock);
457 	}
458 
459 	/* No more references.  Cause subsequent psref_acquire to kassert.  */
460 	target->prt_class = NULL;
461 }
462 
463 static bool
464 _psref_held(const struct psref_target *target, struct psref_class *class,
465     bool lwp_mismatch_ok)
466 {
467 	const struct psref_cpu *pcpu;
468 	const struct psref *psref;
469 	int s;
470 	bool held = false;
471 
472 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
473 		ISSET(curlwp->l_pflag, LP_BOUND)),
474 	    "passive references are CPU-local,"
475 	    " but preemption is enabled and the caller is not"
476 	    " in a softint or CPU-bound LWP");
477 	KASSERTMSG((target->prt_class == class),
478 	    "mismatched psref target class: %p (ref) != %p (expected)",
479 	    target->prt_class, class);
480 
481 	/* Block interrupts and acquire the current CPU's reference list.  */
482 	s = splraiseipl(class->prc_iplcookie);
483 	pcpu = percpu_getref(class->prc_percpu);
484 
485 	/* Search through all the references on this CPU.  */
486 	LIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) {
487 		/* Sanity-check the reference's CPU.  */
488 		KASSERTMSG((psref->psref_cpu == curcpu()),
489 		    "passive reference transferred from CPU %u to CPU %u",
490 		    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
491 
492 		/* If it doesn't match, skip it and move on.  */
493 		if (psref->psref_target != target)
494 			continue;
495 
496 		/*
497 		 * Sanity-check the reference's LWP if we are asserting
498 		 * via psref_held that this LWP holds it, but not if we
499 		 * are testing in psref_target_destroy whether any LWP
500 		 * still holds it.
501 		 */
502 		KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp),
503 		    "passive reference transferred from lwp %p to lwp %p",
504 		    psref->psref_lwp, curlwp);
505 
506 		/* Stop here and report that we found it.  */
507 		held = true;
508 		break;
509 	}
510 
511 	/* Release the CPU list and restore interrupts.  */
512 	percpu_putref(class->prc_percpu);
513 	splx(s);
514 
515 	return held;
516 }
517 
518 /*
519  * psref_held(target, class)
520  *
521  *	True if the current CPU holds a passive reference to target,
522  *	false otherwise.  May be used only inside assertions.
523  */
524 bool
525 psref_held(const struct psref_target *target, struct psref_class *class)
526 {
527 
528 	return _psref_held(target, class, false);
529 }
530