xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision ce099b40997c43048fb78bd578195f81d2456523)
1 /*	$NetBSD: subr_lockdebug.c,v 1.31 2008/04/28 20:24:04 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Basic lock debugging code shared among lock primitives.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.31 2008/04/28 20:24:04 martin Exp $");
38 
39 #include "opt_ddb.h"
40 
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51 
52 #include <lib/libkern/rb.h>
53 
54 #include <machine/lock.h>
55 
56 unsigned int		ld_panic;
57 
58 #ifdef LOCKDEBUG
59 
60 #define	LD_BATCH_SHIFT	9
61 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
62 #define	LD_BATCH_MASK	(LD_BATCH - 1)
63 #define	LD_MAX_LOCKS	1048576
64 #define	LD_SLOP		16
65 
66 #define	LD_LOCKED	0x01
67 #define	LD_SLEEPER	0x02
68 
69 #define	LD_WRITE_LOCK	0x80000000
70 
71 typedef union lockdebuglk {
72 	struct {
73 		u_int	lku_lock;
74 		int	lku_oldspl;
75 	} ul;
76 	uint8_t	lk_pad[COHERENCY_UNIT];
77 } volatile __aligned(COHERENCY_UNIT) lockdebuglk_t;
78 
79 #define	lk_lock		ul.lku_lock
80 #define	lk_oldspl	ul.lku_oldspl
81 
82 typedef struct lockdebug {
83 	struct rb_node	ld_rb_node;	/* must be the first member */
84 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
85 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
86 	volatile void	*ld_lock;
87 	lockops_t	*ld_lockops;
88 	struct lwp	*ld_lwp;
89 	uintptr_t	ld_locked;
90 	uintptr_t	ld_unlocked;
91 	uintptr_t	ld_initaddr;
92 	uint16_t	ld_shares;
93 	uint16_t	ld_cpu;
94 	uint8_t		ld_flags;
95 	uint8_t		ld_shwant;	/* advisory */
96 	uint8_t		ld_exwant;	/* advisory */
97 	uint8_t		ld_unused;
98 } volatile lockdebug_t;
99 
100 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
101 
102 lockdebuglk_t		ld_tree_lk;
103 lockdebuglk_t		ld_sleeper_lk;
104 lockdebuglk_t		ld_spinner_lk;
105 lockdebuglk_t		ld_free_lk;
106 
107 lockdebuglist_t		ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
108 lockdebuglist_t		ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
109 lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
110 lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
111 int			ld_nfree;
112 int			ld_freeptr;
113 int			ld_recurse;
114 bool			ld_nomore;
115 lockdebug_t		*ld_table[LD_MAX_LOCKS / LD_BATCH];
116 
117 lockdebug_t		ld_prime[LD_BATCH];
118 
119 static void	lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
120 				 const char *, const char *, bool);
121 static void	lockdebug_more(void);
122 static void	lockdebug_init(void);
123 
124 static signed int
125 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
126 {
127 	const lockdebug_t *ld1 = (const void *)n1;
128 	const lockdebug_t *ld2 = (const void *)n2;
129 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
130 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
131 
132 	if (a < b)
133 		return 1;
134 	if (a > b)
135 		return -1;
136 	return 0;
137 }
138 
139 static signed int
140 ld_rb_compare_key(const struct rb_node *n, const void *key)
141 {
142 	const lockdebug_t *ld = (const void *)n;
143 	const uintptr_t a = (uintptr_t)ld->ld_lock;
144 	const uintptr_t b = (uintptr_t)key;
145 
146 	if (a < b)
147 		return 1;
148 	if (a > b)
149 		return -1;
150 	return 0;
151 }
152 
153 static struct rb_tree ld_rb_tree;
154 
155 static const struct rb_tree_ops ld_rb_tree_ops = {
156 	.rb_compare_nodes = ld_rb_compare_nodes,
157 	.rb_compare_key = ld_rb_compare_key,
158 };
159 
160 static void
161 lockdebug_lock_init(lockdebuglk_t *lk)
162 {
163 
164 	lk->lk_lock = 0;
165 }
166 
167 static void
168 lockdebug_lock(lockdebuglk_t *lk)
169 {
170 	int s;
171 
172 	s = splhigh();
173 	do {
174 		while (lk->lk_lock != 0) {
175 			SPINLOCK_SPIN_HOOK;
176 		}
177 	} while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0);
178 	lk->lk_oldspl = s;
179 	membar_enter();
180 }
181 
182 static void
183 lockdebug_unlock(lockdebuglk_t *lk)
184 {
185 	int s;
186 
187 	s = lk->lk_oldspl;
188 	membar_exit();
189 	lk->lk_lock = 0;
190 	splx(s);
191 }
192 
193 static int
194 lockdebug_lock_rd(lockdebuglk_t *lk)
195 {
196 	u_int val;
197 	int s;
198 
199 	s = splhigh();
200 	do {
201 		while ((val = lk->lk_lock) == LD_WRITE_LOCK){
202 			SPINLOCK_SPIN_HOOK;
203 		}
204 	} while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val);
205 	membar_enter();
206 	return s;
207 }
208 
209 static void
210 lockdebug_unlock_rd(lockdebuglk_t *lk, int s)
211 {
212 
213 	membar_exit();
214 	atomic_dec_uint(&lk->lk_lock);
215 	splx(s);
216 }
217 
218 static inline lockdebug_t *
219 lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
220 {
221 	lockdebug_t *ld;
222 	int s;
223 
224 	s = lockdebug_lock_rd(&ld_tree_lk);
225 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
226 	lockdebug_unlock_rd(&ld_tree_lk, s);
227 	if (ld == NULL)
228 		return NULL;
229 
230 	if ((ld->ld_flags & LD_SLEEPER) != 0)
231 		*lk = &ld_sleeper_lk;
232 	else
233 		*lk = &ld_spinner_lk;
234 
235 	lockdebug_lock(*lk);
236 	return ld;
237 }
238 
239 /*
240  * lockdebug_lookup:
241  *
242  *	Find a lockdebug structure by a pointer to a lock and return it locked.
243  */
244 static inline lockdebug_t *
245 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
246 {
247 	lockdebug_t *ld;
248 
249 	ld = lockdebug_lookup1(lock, lk);
250 	if (ld == NULL)
251 		panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
252 	return ld;
253 }
254 
255 /*
256  * lockdebug_init:
257  *
258  *	Initialize the lockdebug system.  Allocate an initial pool of
259  *	lockdebug structures before the VM system is up and running.
260  */
261 static void
262 lockdebug_init(void)
263 {
264 	lockdebug_t *ld;
265 	int i;
266 
267 	lockdebug_lock_init(&ld_tree_lk);
268 	lockdebug_lock_init(&ld_sleeper_lk);
269 	lockdebug_lock_init(&ld_spinner_lk);
270 	lockdebug_lock_init(&ld_free_lk);
271 
272 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
273 
274 	ld = ld_prime;
275 	ld_table[0] = ld;
276 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
277 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
278 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
279 	}
280 	ld_freeptr = 1;
281 	ld_nfree = LD_BATCH - 1;
282 }
283 
284 /*
285  * lockdebug_alloc:
286  *
287  *	A lock is being initialized, so allocate an associated debug
288  *	structure.
289  */
290 bool
291 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
292 {
293 	struct cpu_info *ci;
294 	lockdebug_t *ld;
295 	lockdebuglk_t *lk;
296 
297 	if (lo == NULL || panicstr != NULL || ld_panic)
298 		return false;
299 	if (ld_freeptr == 0)
300 		lockdebug_init();
301 
302 	if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
303 		lockdebug_abort1(ld, lk, __func__, "already initialized", true);
304 		return false;
305 	}
306 
307 	/*
308 	 * Pinch a new debug structure.  We may recurse because we call
309 	 * kmem_alloc(), which may need to initialize new locks somewhere
310 	 * down the path.  If not recursing, we try to maintain at least
311 	 * LD_SLOP structures free, which should hopefully be enough to
312 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
313 	 * worry: we'll just mark the lock as not having an ID.
314 	 */
315 	lockdebug_lock(&ld_free_lk);
316 	ci = curcpu();
317 	ci->ci_lkdebug_recurse++;
318 
319 	if (TAILQ_EMPTY(&ld_free)) {
320 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
321 			ci->ci_lkdebug_recurse--;
322 			lockdebug_unlock(&ld_free_lk);
323 			return false;
324 		}
325 		lockdebug_more();
326 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
327 		lockdebug_more();
328 
329 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
330 		lockdebug_unlock(&ld_free_lk);
331 		return false;
332 	}
333 
334 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
335 	ld_nfree--;
336 
337 	ci->ci_lkdebug_recurse--;
338 	lockdebug_unlock(&ld_free_lk);
339 
340 	if (ld->ld_lock != NULL)
341 		panic("lockdebug_alloc: corrupt table");
342 
343 	if (lo->lo_sleeplock)
344 		lockdebug_lock(&ld_sleeper_lk);
345 	else
346 		lockdebug_lock(&ld_spinner_lk);
347 
348 	/* Initialise the structure. */
349 	ld->ld_lock = lock;
350 	ld->ld_lockops = lo;
351 	ld->ld_locked = 0;
352 	ld->ld_unlocked = 0;
353 	ld->ld_lwp = NULL;
354 	ld->ld_initaddr = initaddr;
355 
356 	lockdebug_lock(&ld_tree_lk);
357 	rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
358 	lockdebug_unlock(&ld_tree_lk);
359 
360 	if (lo->lo_sleeplock) {
361 		ld->ld_flags = LD_SLEEPER;
362 		lockdebug_unlock(&ld_sleeper_lk);
363 	} else {
364 		ld->ld_flags = 0;
365 		lockdebug_unlock(&ld_spinner_lk);
366 	}
367 
368 	return true;
369 }
370 
371 /*
372  * lockdebug_free:
373  *
374  *	A lock is being destroyed, so release debugging resources.
375  */
376 void
377 lockdebug_free(volatile void *lock)
378 {
379 	lockdebug_t *ld;
380 	lockdebuglk_t *lk;
381 
382 	if (panicstr != NULL || ld_panic)
383 		return;
384 
385 	ld = lockdebug_lookup(lock, &lk);
386 	if (ld == NULL) {
387 		panic("lockdebug_free: destroying uninitialized lock %p"
388 		    "(ld_lock=%p)", lock, ld->ld_lock);
389 		lockdebug_abort1(ld, lk, __func__, "lock record follows",
390 		    true);
391 		return;
392 	}
393 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
394 		lockdebug_abort1(ld, lk, __func__, "is locked", true);
395 		return;
396 	}
397 	lockdebug_lock(&ld_tree_lk);
398 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
399 	lockdebug_unlock(&ld_tree_lk);
400 	ld->ld_lock = NULL;
401 	lockdebug_unlock(lk);
402 
403 	lockdebug_lock(&ld_free_lk);
404 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
405 	ld_nfree++;
406 	lockdebug_unlock(&ld_free_lk);
407 }
408 
409 /*
410  * lockdebug_more:
411  *
412  *	Allocate a batch of debug structures and add to the free list.
413  *	Must be called with ld_free_lk held.
414  */
415 static void
416 lockdebug_more(void)
417 {
418 	lockdebug_t *ld;
419 	void *block;
420 	int i, base, m;
421 
422 	while (ld_nfree < LD_SLOP) {
423 		lockdebug_unlock(&ld_free_lk);
424 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
425 		lockdebug_lock(&ld_free_lk);
426 
427 		if (block == NULL)
428 			return;
429 
430 		if (ld_nfree > LD_SLOP) {
431 			/* Somebody beat us to it. */
432 			lockdebug_unlock(&ld_free_lk);
433 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
434 			lockdebug_lock(&ld_free_lk);
435 			continue;
436 		}
437 
438 		base = ld_freeptr;
439 		ld_nfree += LD_BATCH;
440 		ld = block;
441 		base <<= LD_BATCH_SHIFT;
442 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
443 
444 		if (m == LD_MAX_LOCKS)
445 			ld_nomore = true;
446 
447 		for (i = base; i < m; i++, ld++) {
448 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
449 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
450 		}
451 
452 		membar_producer();
453 		ld_table[ld_freeptr++] = block;
454 	}
455 }
456 
457 /*
458  * lockdebug_wantlock:
459  *
460  *	Process the preamble to a lock acquire.
461  */
462 void
463 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
464 {
465 	struct lwp *l = curlwp;
466 	lockdebuglk_t *lk;
467 	lockdebug_t *ld;
468 	bool recurse;
469 
470 	(void)shared;
471 	recurse = false;
472 
473 	if (panicstr != NULL || ld_panic)
474 		return;
475 
476 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
477 		return;
478 
479 	if ((ld->ld_flags & LD_LOCKED) != 0) {
480 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
481 			if (ld->ld_lwp == l)
482 				recurse = true;
483 		} else if (ld->ld_cpu == (uint16_t)cpu_number())
484 			recurse = true;
485 	}
486 
487 	if (cpu_intr_p()) {
488 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
489 			lockdebug_abort1(ld, lk, __func__,
490 			    "acquiring sleep lock from interrupt context",
491 			    true);
492 			return;
493 		}
494 	}
495 
496 	if (shared)
497 		ld->ld_shwant++;
498 	else
499 		ld->ld_exwant++;
500 
501 	if (recurse) {
502 		lockdebug_abort1(ld, lk, __func__, "locking against myself",
503 		    true);
504 		return;
505 	}
506 
507 	lockdebug_unlock(lk);
508 }
509 
510 /*
511  * lockdebug_locked:
512  *
513  *	Process a lock acquire operation.
514  */
515 void
516 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
517 {
518 	struct lwp *l = curlwp;
519 	lockdebuglk_t *lk;
520 	lockdebug_t *ld;
521 
522 	if (panicstr != NULL || ld_panic)
523 		return;
524 
525 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
526 		return;
527 
528 	if (shared) {
529 		l->l_shlocks++;
530 		ld->ld_shares++;
531 		ld->ld_shwant--;
532 	} else {
533 		if ((ld->ld_flags & LD_LOCKED) != 0) {
534 			lockdebug_abort1(ld, lk, __func__,
535 			    "already locked", true);
536 			return;
537 		}
538 
539 		ld->ld_flags |= LD_LOCKED;
540 		ld->ld_locked = where;
541 		ld->ld_cpu = (uint16_t)cpu_number();
542 		ld->ld_lwp = l;
543 		ld->ld_exwant--;
544 
545 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
546 			l->l_exlocks++;
547 			TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
548 		} else {
549 			curcpu()->ci_spin_locks2++;
550 			TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
551 		}
552 	}
553 
554 	lockdebug_unlock(lk);
555 }
556 
557 /*
558  * lockdebug_unlocked:
559  *
560  *	Process a lock release operation.
561  */
562 void
563 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
564 {
565 	struct lwp *l = curlwp;
566 	lockdebuglk_t *lk;
567 	lockdebug_t *ld;
568 
569 	if (panicstr != NULL || ld_panic)
570 		return;
571 
572 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
573 		return;
574 
575 	if (shared) {
576 		if (l->l_shlocks == 0) {
577 			lockdebug_abort1(ld, lk, __func__,
578 			    "no shared locks held by LWP", true);
579 			return;
580 		}
581 		if (ld->ld_shares == 0) {
582 			lockdebug_abort1(ld, lk, __func__,
583 			    "no shared holds on this lock", true);
584 			return;
585 		}
586 		l->l_shlocks--;
587 		ld->ld_shares--;
588 	} else {
589 		if ((ld->ld_flags & LD_LOCKED) == 0) {
590 			lockdebug_abort1(ld, lk, __func__, "not locked",
591 			    true);
592 			return;
593 		}
594 
595 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
596 			if (ld->ld_lwp != curlwp) {
597 				lockdebug_abort1(ld, lk, __func__,
598 				    "not held by current LWP", true);
599 				return;
600 			}
601 			ld->ld_flags &= ~LD_LOCKED;
602 			ld->ld_unlocked = where;
603 			ld->ld_lwp = NULL;
604 			curlwp->l_exlocks--;
605 			TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
606 		} else {
607 			if (ld->ld_cpu != (uint16_t)cpu_number()) {
608 				lockdebug_abort1(ld, lk, __func__,
609 				    "not held by current CPU", true);
610 				return;
611 			}
612 			ld->ld_flags &= ~LD_LOCKED;
613 			ld->ld_unlocked = where;
614 			ld->ld_lwp = NULL;
615 			curcpu()->ci_spin_locks2--;
616 			TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
617 		}
618 	}
619 
620 	lockdebug_unlock(lk);
621 }
622 
623 /*
624  * lockdebug_barrier:
625  *
626  *	Panic if we hold more than one specified spin lock, and optionally,
627  *	if we hold sleep locks.
628  */
629 void
630 lockdebug_barrier(volatile void *spinlock, int slplocks)
631 {
632 	struct lwp *l = curlwp;
633 	lockdebug_t *ld;
634 	uint16_t cpuno;
635 	int s, s0;
636 
637 	if (panicstr != NULL || ld_panic)
638 		return;
639 
640 	/*
641 	 * Use splsoftclock() and not a critical section to block preemption.
642 	 * kpreempt_disable() will skew preemption statistics by firing again
643 	 * in mi_switch(), while we are preempting!
644 	 */
645 	s0 = splsoftclock();
646 
647 	if (curcpu()->ci_spin_locks2 != 0) {
648 		cpuno = (uint16_t)cpu_number();
649 
650 		s = lockdebug_lock_rd(&ld_spinner_lk);
651 		TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
652 			if (ld->ld_lock == spinlock) {
653 				if (ld->ld_cpu != cpuno) {
654 					lockdebug_abort1(ld, &ld_spinner_lk,
655 					    __func__,
656 					    "not held by current CPU", true);
657 					splx(s0);
658 					return;
659 				}
660 				continue;
661 			}
662 			if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0) {
663 				lockdebug_abort1(ld, &ld_spinner_lk,
664 				    __func__, "spin lock held", true);
665 				splx(s0);
666 				return;
667 			}
668 		}
669 		lockdebug_unlock_rd(&ld_spinner_lk, s);
670 	}
671 
672 	if (!slplocks) {
673 		if (l->l_exlocks != 0) {
674 			s = lockdebug_lock_rd(&ld_sleeper_lk);
675 			TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
676 				if (ld->ld_lwp == l) {
677 					lockdebug_abort1(ld, &ld_sleeper_lk,
678 					    __func__, "sleep lock held", true);
679 					splx(s0);
680 					return;
681 				}
682 			}
683 			lockdebug_unlock_rd(&ld_sleeper_lk, s);
684 		}
685 		if (l->l_shlocks != 0)
686 			panic("lockdebug_barrier: holding %d shared locks",
687 			    l->l_shlocks);
688 	}
689 
690 	splx(s0);
691 }
692 
693 /*
694  * lockdebug_mem_check:
695  *
696  *	Check for in-use locks within a memory region that is
697  *	being freed.
698  */
699 void
700 lockdebug_mem_check(const char *func, void *base, size_t sz)
701 {
702 	lockdebug_t *ld;
703 	lockdebuglk_t *lk;
704 	int s;
705 
706 	if (panicstr != NULL || ld_panic)
707 		return;
708 
709 	s = lockdebug_lock_rd(&ld_tree_lk);
710 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
711 	if (ld != NULL) {
712 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
713 
714 		if ((uintptr_t)base > lock)
715 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
716 			    __func__, ld, base, sz);
717 		if (lock >= (uintptr_t)base + sz)
718 			ld = NULL;
719 	}
720 	lockdebug_unlock_rd(&ld_tree_lk, s);
721 	if (ld == NULL)
722 		return;
723 
724 	if ((ld->ld_flags & LD_SLEEPER) != 0)
725 		lk = &ld_sleeper_lk;
726 	else
727 		lk = &ld_spinner_lk;
728 
729 	lockdebug_lock(lk);
730 	lockdebug_abort1(ld, lk, func,
731 	    "allocation contains active lock", !cold);
732 }
733 
734 /*
735  * lockdebug_dump:
736  *
737  *	Dump information about a lock on panic, or for DDB.
738  */
739 static void
740 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
741 {
742 	int sleeper = (ld->ld_flags & LD_SLEEPER);
743 
744 	(*pr)(
745 	    "lock address : %#018lx type     : %18s\n"
746 	    "shared holds : %18u exclusive: %18u\n"
747 	    "shares wanted: %18u exclusive: %18u\n"
748 	    "current cpu  : %18u last held: %18u\n"
749 	    "current lwp  : %#018lx last held: %#018lx\n"
750 	    "last locked  : %#018lx unlocked : %#018lx\n"
751 	    "initialized  : %#018lx\n",
752 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
753 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
754 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
755 	    (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
756 	    (long)curlwp, (long)ld->ld_lwp,
757 	    (long)ld->ld_locked, (long)ld->ld_unlocked,
758 	    (long)ld->ld_initaddr);
759 
760 	if (ld->ld_lockops->lo_dump != NULL)
761 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
762 
763 	if (sleeper) {
764 		(*pr)("\n");
765 		turnstile_print(ld->ld_lock, pr);
766 	}
767 }
768 
769 /*
770  * lockdebug_abort1:
771  *
772  *	An error has been trapped - dump lock info and panic.
773  */
774 static void
775 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
776 		 const char *msg, bool dopanic)
777 {
778 
779 	/*
780 	 * Don't make the situation wose if the system is already going
781 	 * down in flames.  Once a panic is triggered, lockdebug state
782 	 * becomes stale and cannot be trusted.
783 	 */
784 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
785 		lockdebug_unlock(lk);
786 		return;
787 	}
788 
789 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
790 	    func, msg);
791 	lockdebug_dump(ld, printf_nolog);
792 	lockdebug_unlock(lk);
793 	printf_nolog("\n");
794 	if (dopanic)
795 		panic("LOCKDEBUG");
796 }
797 
798 #endif	/* LOCKDEBUG */
799 
800 /*
801  * lockdebug_lock_print:
802  *
803  *	Handle the DDB 'show lock' command.
804  */
805 #ifdef DDB
806 void
807 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
808 {
809 #ifdef LOCKDEBUG
810 	lockdebug_t *ld;
811 
812 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
813 		if (ld->ld_lock == addr) {
814 			lockdebug_dump(ld, pr);
815 			return;
816 		}
817 	}
818 	(*pr)("Sorry, no record of a lock with address %p found.\n", addr);
819 #else
820 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
821 #endif	/* LOCKDEBUG */
822 }
823 #endif	/* DDB */
824 
825 /*
826  * lockdebug_abort:
827  *
828  *	An error has been trapped - dump lock info and call panic().
829  */
830 void
831 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
832 		const char *msg)
833 {
834 #ifdef LOCKDEBUG
835 	lockdebug_t *ld;
836 	lockdebuglk_t *lk;
837 
838 	if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
839 		lockdebug_abort1(ld, lk, func, msg, true);
840 		/* NOTREACHED */
841 	}
842 #endif	/* LOCKDEBUG */
843 
844 	/*
845 	 * Complain first on the occurrance only.  Otherwise proceeed to
846 	 * panic where we will `rendezvous' with other CPUs if the machine
847 	 * is going down in flames.
848 	 */
849 	if (atomic_inc_uint_nv(&ld_panic) == 1) {
850 		printf_nolog("%s error: %s: %s\n\n"
851 		    "lock address : %#018lx\n"
852 		    "current cpu  : %18d\n"
853 		    "current lwp  : %#018lx\n",
854 		    ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
855 		    (long)curlwp);
856 		(*ops->lo_dump)(lock);
857 		printf_nolog("\n");
858 	}
859 
860 	panic("lock error");
861 }
862