xref: /netbsd-src/sys/kern/subr_psref.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: subr_psref.c,v 1.13 2019/05/17 03:34:26 ozaki-r Exp $	*/
2 
3 /*-
4  * Copyright (c) 2016 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Passive references
34  *
35  *	Passive references are references to objects that guarantee the
36  *	object will not be destroyed until the reference is released.
37  *
38  *	Passive references require no interprocessor synchronization to
39  *	acquire or release.  However, destroying the target of passive
40  *	references requires expensive interprocessor synchronization --
41  *	xcalls to determine on which CPUs the object is still in use.
42  *
43  *	Passive references may be held only on a single CPU and by a
44  *	single LWP.  They require the caller to allocate a little stack
45  *	space, a struct psref object.  Sleeping while a passive
46  *	reference is held is allowed, provided that the owner's LWP is
47  *	bound to a CPU -- e.g., the owner is a softint or a bound
48  *	kthread.  However, sleeping should be kept to a short duration,
49  *	e.g. sleeping on an adaptive lock.
50  *
51  *	Passive references serve as an intermediate stage between
52  *	reference counting and passive serialization (pserialize(9)):
53  *
54  *	- If you need references to transfer from CPU to CPU or LWP to
55  *	  LWP, or if you need long-term references, you must use
56  *	  reference counting, e.g. with atomic operations or locks,
57  *	  which incurs interprocessor synchronization for every use --
58  *	  cheaper than an xcall, but not scalable.
59  *
60  *	- If all users *guarantee* that they will not sleep, then it is
61  *	  not necessary to use passive references: you may as well just
62  *	  use the even cheaper pserialize(9), because you have
63  *	  satisfied the requirements of a pserialize read section.
64  */
65 
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.13 2019/05/17 03:34:26 ozaki-r Exp $");
68 
69 #include <sys/types.h>
70 #include <sys/condvar.h>
71 #include <sys/cpu.h>
72 #include <sys/intr.h>
73 #include <sys/kmem.h>
74 #include <sys/lwp.h>
75 #include <sys/mutex.h>
76 #include <sys/percpu.h>
77 #include <sys/psref.h>
78 #include <sys/queue.h>
79 #include <sys/xcall.h>
80 #include <sys/lwp.h>
81 
82 SLIST_HEAD(psref_head, psref);
83 
84 static bool	_psref_held(const struct psref_target *, struct psref_class *,
85 		    bool);
86 
87 /*
88  * struct psref_class
89  *
90  *	Private global state for a class of passive reference targets.
91  *	Opaque to callers.
92  */
93 struct psref_class {
94 	kmutex_t		prc_lock;
95 	kcondvar_t		prc_cv;
96 	struct percpu		*prc_percpu; /* struct psref_cpu */
97 	ipl_cookie_t		prc_iplcookie;
98 	unsigned int		prc_xc_flags;
99 };
100 
101 /*
102  * struct psref_cpu
103  *
104  *	Private per-CPU state for a class of passive reference targets.
105  *	Not exposed by the API.
106  */
107 struct psref_cpu {
108 	struct psref_head	pcpu_head;
109 };
110 
111 /*
112  * Data structures and functions for debugging.
113  */
114 #ifndef PSREF_DEBUG_NITEMS
115 #define PSREF_DEBUG_NITEMS 16
116 #endif
117 
118 struct psref_debug_item {
119 	void			*prdi_caller;
120 	struct psref		*prdi_psref;
121 };
122 
123 struct psref_debug {
124 	int			prd_refs_peek;
125 	struct psref_debug_item prd_items[PSREF_DEBUG_NITEMS];
126 };
127 
128 #ifdef PSREF_DEBUG
129 static void psref_debug_acquire(struct psref *);
130 static void psref_debug_release(struct psref *);
131 
132 static void psref_debug_lwp_free(void *);
133 
134 static specificdata_key_t psref_debug_lwp_key;
135 #endif
136 
137 /*
138  * psref_init()
139  */
140 void
141 psref_init(void)
142 {
143 
144 #ifdef PSREF_DEBUG
145 	lwp_specific_key_create(&psref_debug_lwp_key, psref_debug_lwp_free);
146 #endif
147 }
148 
149 /*
150  * psref_class_create(name, ipl)
151  *
152  *	Create a new passive reference class, with the given wchan name
153  *	and ipl.
154  */
155 struct psref_class *
156 psref_class_create(const char *name, int ipl)
157 {
158 	struct psref_class *class;
159 
160 	ASSERT_SLEEPABLE();
161 
162 	class = kmem_alloc(sizeof(*class), KM_SLEEP);
163 	class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu));
164 	mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl);
165 	cv_init(&class->prc_cv, name);
166 	class->prc_iplcookie = makeiplcookie(ipl);
167 	class->prc_xc_flags = XC_HIGHPRI_IPL(ipl);
168 
169 	return class;
170 }
171 
172 #ifdef DIAGNOSTIC
173 static void
174 psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused)
175 {
176 	const struct psref_cpu *pcpu = p;
177 	bool *retp = cookie;
178 
179 	if (!SLIST_EMPTY(&pcpu->pcpu_head))
180 		*retp = false;
181 }
182 
183 static bool
184 psref_class_drained_p(const struct psref_class *prc)
185 {
186 	bool ret = true;
187 
188 	percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret);
189 
190 	return ret;
191 }
192 #endif	/* DIAGNOSTIC */
193 
194 /*
195  * psref_class_destroy(class)
196  *
197  *	Destroy a passive reference class and free memory associated
198  *	with it.  All targets in this class must have been drained and
199  *	destroyed already.
200  */
201 void
202 psref_class_destroy(struct psref_class *class)
203 {
204 
205 	KASSERT(psref_class_drained_p(class));
206 
207 	cv_destroy(&class->prc_cv);
208 	mutex_destroy(&class->prc_lock);
209 	percpu_free(class->prc_percpu, sizeof(struct psref_cpu));
210 	kmem_free(class, sizeof(*class));
211 }
212 
213 /*
214  * psref_target_init(target, class)
215  *
216  *	Initialize a passive reference target in the specified class.
217  *	The caller is responsible for issuing a membar_producer after
218  *	psref_target_init and before exposing a pointer to the target
219  *	to other CPUs.
220  */
221 void
222 psref_target_init(struct psref_target *target,
223     struct psref_class *class)
224 {
225 
226 	target->prt_class = class;
227 	target->prt_draining = false;
228 }
229 
230 #ifdef DEBUG
231 static bool
232 psref_exist(struct psref_cpu *pcpu, struct psref *psref)
233 {
234 	struct psref *_psref;
235 
236 	SLIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) {
237 		if (_psref == psref)
238 			return true;
239 	}
240 	return false;
241 }
242 
243 static void
244 psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref,
245     const struct psref_target *target)
246 {
247 	bool found = false;
248 
249 	found = psref_exist(pcpu, psref);
250 	if (found) {
251 		panic("The psref is already in the list (acquiring twice?): "
252 		    "psref=%p target=%p", psref, target);
253 	}
254 }
255 
256 static void
257 psref_check_existence(struct psref_cpu *pcpu, struct psref *psref,
258     const struct psref_target *target)
259 {
260 	bool found = false;
261 
262 	found = psref_exist(pcpu, psref);
263 	if (!found) {
264 		panic("The psref isn't in the list (releasing unused psref?): "
265 		    "psref=%p target=%p", psref, target);
266 	}
267 }
268 #endif /* DEBUG */
269 
270 /*
271  * psref_acquire(psref, target, class)
272  *
273  *	Acquire a passive reference to the specified target, which must
274  *	be in the specified class.
275  *
276  *	The caller must guarantee that the target will not be destroyed
277  *	before psref_acquire returns.
278  *
279  *	The caller must additionally guarantee that it will not switch
280  *	CPUs before releasing the passive reference, either by
281  *	disabling kpreemption and avoiding sleeps, or by being in a
282  *	softint or in an LWP bound to a CPU.
283  */
284 void
285 psref_acquire(struct psref *psref, const struct psref_target *target,
286     struct psref_class *class)
287 {
288 	struct psref_cpu *pcpu;
289 	int s;
290 
291 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
292 		ISSET(curlwp->l_pflag, LP_BOUND)),
293 	    "passive references are CPU-local,"
294 	    " but preemption is enabled and the caller is not"
295 	    " in a softint or CPU-bound LWP");
296 	KASSERTMSG((target->prt_class == class),
297 	    "mismatched psref target class: %p (ref) != %p (expected)",
298 	    target->prt_class, class);
299 	KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
300 	    target);
301 
302 	/* Block interrupts and acquire the current CPU's reference list.  */
303 	s = splraiseipl(class->prc_iplcookie);
304 	pcpu = percpu_getref(class->prc_percpu);
305 
306 #ifdef DEBUG
307 	/* Sanity-check if the target is already acquired with the same psref.  */
308 	psref_check_duplication(pcpu, psref, target);
309 #endif
310 
311 	/* Record our reference.  */
312 	SLIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry);
313 	psref->psref_target = target;
314 	psref->psref_lwp = curlwp;
315 	psref->psref_cpu = curcpu();
316 
317 	/* Release the CPU list and restore interrupts.  */
318 	percpu_putref(class->prc_percpu);
319 	splx(s);
320 
321 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
322 	curlwp->l_psrefs++;
323 #endif
324 #ifdef PSREF_DEBUG
325 	psref_debug_acquire(psref);
326 #endif
327 }
328 
329 /*
330  * psref_release(psref, target, class)
331  *
332  *	Release a passive reference to the specified target, which must
333  *	be in the specified class.
334  *
335  *	The caller must not have switched CPUs or LWPs since acquiring
336  *	the passive reference.
337  */
338 void
339 psref_release(struct psref *psref, const struct psref_target *target,
340     struct psref_class *class)
341 {
342 	struct psref_cpu *pcpu;
343 	int s;
344 
345 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
346 		ISSET(curlwp->l_pflag, LP_BOUND)),
347 	    "passive references are CPU-local,"
348 	    " but preemption is enabled and the caller is not"
349 	    " in a softint or CPU-bound LWP");
350 	KASSERTMSG((target->prt_class == class),
351 	    "mismatched psref target class: %p (ref) != %p (expected)",
352 	    target->prt_class, class);
353 
354 	/* Make sure the psref looks sensible.  */
355 	KASSERTMSG((psref->psref_target == target),
356 	    "passive reference target mismatch: %p (ref) != %p (expected)",
357 	    psref->psref_target, target);
358 	KASSERTMSG((psref->psref_lwp == curlwp),
359 	    "passive reference transferred from lwp %p to lwp %p",
360 	    psref->psref_lwp, curlwp);
361 	KASSERTMSG((psref->psref_cpu == curcpu()),
362 	    "passive reference transferred from CPU %u to CPU %u",
363 	    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
364 
365 	/*
366 	 * Block interrupts and remove the psref from the current CPU's
367 	 * list.  No need to percpu_getref or get the head of the list,
368 	 * and the caller guarantees that we are bound to a CPU anyway
369 	 * (as does blocking interrupts).
370 	 */
371 	s = splraiseipl(class->prc_iplcookie);
372 	pcpu = percpu_getref(class->prc_percpu);
373 #ifdef DEBUG
374 	/* Sanity-check if the target is surely acquired before.  */
375 	psref_check_existence(pcpu, psref, target);
376 #endif
377 	SLIST_REMOVE(&pcpu->pcpu_head, psref, psref, psref_entry);
378 	percpu_putref(class->prc_percpu);
379 	splx(s);
380 
381 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
382 	KASSERT(curlwp->l_psrefs > 0);
383 	curlwp->l_psrefs--;
384 #endif
385 #ifdef PSREF_DEBUG
386 	psref_debug_release(psref);
387 #endif
388 
389 	/* If someone is waiting for users to drain, notify 'em.  */
390 	if (__predict_false(target->prt_draining))
391 		cv_broadcast(&class->prc_cv);
392 }
393 
394 /*
395  * psref_copy(pto, pfrom, class)
396  *
397  *	Copy a passive reference from pfrom, which must be in the
398  *	specified class, to pto.  Both pfrom and pto must later be
399  *	released with psref_release.
400  *
401  *	The caller must not have switched CPUs or LWPs since acquiring
402  *	pfrom, and must not switch CPUs or LWPs before releasing both
403  *	pfrom and pto.
404  */
405 void
406 psref_copy(struct psref *pto, const struct psref *pfrom,
407     struct psref_class *class)
408 {
409 	struct psref_cpu *pcpu;
410 	int s;
411 
412 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
413 		ISSET(curlwp->l_pflag, LP_BOUND)),
414 	    "passive references are CPU-local,"
415 	    " but preemption is enabled and the caller is not"
416 	    " in a softint or CPU-bound LWP");
417 	KASSERTMSG((pto != pfrom),
418 	    "can't copy passive reference to itself: %p",
419 	    pto);
420 
421 	/* Make sure the pfrom reference looks sensible.  */
422 	KASSERTMSG((pfrom->psref_lwp == curlwp),
423 	    "passive reference transferred from lwp %p to lwp %p",
424 	    pfrom->psref_lwp, curlwp);
425 	KASSERTMSG((pfrom->psref_cpu == curcpu()),
426 	    "passive reference transferred from CPU %u to CPU %u",
427 	    cpu_index(pfrom->psref_cpu), cpu_index(curcpu()));
428 	KASSERTMSG((pfrom->psref_target->prt_class == class),
429 	    "mismatched psref target class: %p (ref) != %p (expected)",
430 	    pfrom->psref_target->prt_class, class);
431 
432 	/* Block interrupts and acquire the current CPU's reference list.  */
433 	s = splraiseipl(class->prc_iplcookie);
434 	pcpu = percpu_getref(class->prc_percpu);
435 
436 	/* Record the new reference.  */
437 	SLIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry);
438 	pto->psref_target = pfrom->psref_target;
439 	pto->psref_lwp = curlwp;
440 	pto->psref_cpu = curcpu();
441 
442 	/* Release the CPU list and restore interrupts.  */
443 	percpu_putref(class->prc_percpu);
444 	splx(s);
445 
446 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
447 	curlwp->l_psrefs++;
448 #endif
449 }
450 
451 /*
452  * struct psreffed
453  *
454  *	Global state for draining a psref target.
455  */
456 struct psreffed {
457 	struct psref_class	*class;
458 	struct psref_target	*target;
459 	bool			ret;
460 };
461 
462 static void
463 psreffed_p_xc(void *cookie0, void *cookie1 __unused)
464 {
465 	struct psreffed *P = cookie0;
466 
467 	/*
468 	 * If we hold a psref to the target, then answer true.
469 	 *
470 	 * This is the only dynamic decision that may be made with
471 	 * psref_held.
472 	 *
473 	 * No need to lock anything here: every write transitions from
474 	 * false to true, so there can be no conflicting writes.  No
475 	 * need for a memory barrier here because P->ret is read only
476 	 * after xc_wait, which has already issued any necessary memory
477 	 * barriers.
478 	 */
479 	if (_psref_held(P->target, P->class, true))
480 		P->ret = true;
481 }
482 
483 static bool
484 psreffed_p(struct psref_target *target, struct psref_class *class)
485 {
486 	struct psreffed P = {
487 		.class = class,
488 		.target = target,
489 		.ret = false,
490 	};
491 
492 	if (__predict_true(mp_online)) {
493 		/*
494 		 * Ask all CPUs to say whether they hold a psref to the
495 		 * target.
496 		 */
497 		xc_wait(xc_broadcast(class->prc_xc_flags, &psreffed_p_xc, &P,
498 		                     NULL));
499 	} else
500 		psreffed_p_xc(&P, NULL);
501 
502 	return P.ret;
503 }
504 
505 /*
506  * psref_target_destroy(target, class)
507  *
508  *	Destroy a passive reference target.  Waits for all existing
509  *	references to drain.  Caller must guarantee no new references
510  *	will be acquired once it calls psref_target_destroy, e.g. by
511  *	removing the target from a global list first.  May sleep.
512  */
513 void
514 psref_target_destroy(struct psref_target *target, struct psref_class *class)
515 {
516 
517 	ASSERT_SLEEPABLE();
518 
519 	KASSERTMSG((target->prt_class == class),
520 	    "mismatched psref target class: %p (ref) != %p (expected)",
521 	    target->prt_class, class);
522 
523 	/* Request psref_release to notify us when done.  */
524 	KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
525 	    target);
526 	target->prt_draining = true;
527 
528 	/* Wait until there are no more references on any CPU.  */
529 	while (psreffed_p(target, class)) {
530 		/*
531 		 * This enter/wait/exit business looks wrong, but it is
532 		 * both necessary, because psreffed_p performs a
533 		 * low-priority xcall and hence cannot run while a
534 		 * mutex is locked, and OK, because the wait is timed
535 		 * -- explicit wakeups are only an optimization.
536 		 */
537 		mutex_enter(&class->prc_lock);
538 		(void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1);
539 		mutex_exit(&class->prc_lock);
540 	}
541 
542 	/* No more references.  Cause subsequent psref_acquire to kassert.  */
543 	target->prt_class = NULL;
544 }
545 
546 static bool
547 _psref_held(const struct psref_target *target, struct psref_class *class,
548     bool lwp_mismatch_ok)
549 {
550 	const struct psref_cpu *pcpu;
551 	const struct psref *psref;
552 	int s;
553 	bool held = false;
554 
555 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
556 		ISSET(curlwp->l_pflag, LP_BOUND)),
557 	    "passive references are CPU-local,"
558 	    " but preemption is enabled and the caller is not"
559 	    " in a softint or CPU-bound LWP");
560 	KASSERTMSG((target->prt_class == class),
561 	    "mismatched psref target class: %p (ref) != %p (expected)",
562 	    target->prt_class, class);
563 
564 	/* Block interrupts and acquire the current CPU's reference list.  */
565 	s = splraiseipl(class->prc_iplcookie);
566 	pcpu = percpu_getref(class->prc_percpu);
567 
568 	/* Search through all the references on this CPU.  */
569 	SLIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) {
570 		/* Sanity-check the reference's CPU.  */
571 		KASSERTMSG((psref->psref_cpu == curcpu()),
572 		    "passive reference transferred from CPU %u to CPU %u",
573 		    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
574 
575 		/* If it doesn't match, skip it and move on.  */
576 		if (psref->psref_target != target)
577 			continue;
578 
579 		/*
580 		 * Sanity-check the reference's LWP if we are asserting
581 		 * via psref_held that this LWP holds it, but not if we
582 		 * are testing in psref_target_destroy whether any LWP
583 		 * still holds it.
584 		 */
585 		KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp),
586 		    "passive reference transferred from lwp %p to lwp %p",
587 		    psref->psref_lwp, curlwp);
588 
589 		/* Stop here and report that we found it.  */
590 		held = true;
591 		break;
592 	}
593 
594 	/* Release the CPU list and restore interrupts.  */
595 	percpu_putref(class->prc_percpu);
596 	splx(s);
597 
598 	return held;
599 }
600 
601 /*
602  * psref_held(target, class)
603  *
604  *	True if the current CPU holds a passive reference to target,
605  *	false otherwise.  May be used only inside assertions.
606  */
607 bool
608 psref_held(const struct psref_target *target, struct psref_class *class)
609 {
610 
611 	return _psref_held(target, class, false);
612 }
613 
614 #ifdef PSREF_DEBUG
615 void
616 psref_debug_init_lwp(struct lwp *l)
617 {
618 	struct psref_debug *prd;
619 
620 	prd = kmem_zalloc(sizeof(*prd), KM_SLEEP);
621 	lwp_setspecific_by_lwp(l, psref_debug_lwp_key, prd);
622 }
623 
624 static void
625 psref_debug_lwp_free(void *arg)
626 {
627 	struct psref_debug *prd = arg;
628 
629 	kmem_free(prd, sizeof(*prd));
630 }
631 
632 static void
633 psref_debug_acquire(struct psref *psref)
634 {
635 	struct psref_debug *prd;
636 	struct lwp *l = curlwp;
637 	int s, i;
638 
639 	prd = lwp_getspecific(psref_debug_lwp_key);
640 	if (__predict_false(prd == NULL)) {
641 		psref->psref_debug = NULL;
642 		return;
643 	}
644 
645 	s = splserial();
646 	if (l->l_psrefs > prd->prd_refs_peek) {
647 		prd->prd_refs_peek = l->l_psrefs;
648 		if (__predict_false(prd->prd_refs_peek > PSREF_DEBUG_NITEMS))
649 			panic("exceeded PSREF_DEBUG_NITEMS");
650 	}
651 	for (i = 0; i < prd->prd_refs_peek; i++) {
652 		struct psref_debug_item *prdi = &prd->prd_items[i];
653 		if (prdi->prdi_psref != NULL)
654 			continue;
655 		prdi->prdi_caller = psref->psref_debug;
656 		prdi->prdi_psref = psref;
657 		psref->psref_debug = prdi;
658 		break;
659 	}
660 	if (__predict_false(i == prd->prd_refs_peek))
661 		panic("out of range: %d", i);
662 	splx(s);
663 }
664 
665 static void
666 psref_debug_release(struct psref *psref)
667 {
668 	int s;
669 
670 	s = splserial();
671 	if (__predict_true(psref->psref_debug != NULL)) {
672 		struct psref_debug_item *prdi = psref->psref_debug;
673 		prdi->prdi_psref = NULL;
674 	}
675 	splx(s);
676 }
677 
678 void
679 psref_debug_barrier(void)
680 {
681 	struct psref_debug *prd;
682 	struct lwp *l = curlwp;
683 	int s, i;
684 
685 	prd = lwp_getspecific(psref_debug_lwp_key);
686 	if (__predict_false(prd == NULL))
687 		return;
688 
689 	s = splserial();
690 	for (i = 0; i < prd->prd_refs_peek; i++) {
691 		struct psref_debug_item *prdi = &prd->prd_items[i];
692 		if (__predict_true(prdi->prdi_psref == NULL))
693 			continue;
694 		panic("psref leaked: lwp(%p) acquired at %p", l, prdi->prdi_caller);
695 	}
696 	prd->prd_refs_peek = 0; /* Reset the counter */
697 	splx(s);
698 }
699 #endif /* PSREF_DEBUG */
700