xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: subr_lockdebug.c,v 1.23 2007/12/08 15:00:13 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Basic lock debugging code shared among lock primitives.
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.23 2007/12/08 15:00:13 ad Exp $");
45 
46 #include "opt_ddb.h"
47 
48 #include <sys/param.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/kmem.h>
53 #include <sys/lock.h>
54 #include <sys/lockdebug.h>
55 #include <sys/sleepq.h>
56 #include <sys/cpu.h>
57 #include <sys/atomic.h>
58 
59 #include <lib/libkern/rb.h>
60 
61 #ifdef LOCKDEBUG
62 
63 #define	LD_BATCH_SHIFT	9
64 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
65 #define	LD_BATCH_MASK	(LD_BATCH - 1)
66 #define	LD_MAX_LOCKS	1048576
67 #define	LD_SLOP		16
68 
69 #define	LD_LOCKED	0x01
70 #define	LD_SLEEPER	0x02
71 
72 #define	LD_WRITE_LOCK	0x80000000
73 
74 typedef union lockdebuglk {
75 	struct {
76 		u_int	lku_lock;
77 		int	lku_oldspl;
78 	} ul;
79 	uint8_t	lk_pad[CACHE_LINE_SIZE];
80 } volatile __aligned(CACHE_LINE_SIZE) lockdebuglk_t;
81 
82 #define	lk_lock		ul.lku_lock
83 #define	lk_oldspl	ul.lku_oldspl
84 
85 typedef struct lockdebug {
86 	struct rb_node	ld_rb_node;	/* must be the first member */
87 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
88 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
89 	volatile void	*ld_lock;
90 	lockops_t	*ld_lockops;
91 	struct lwp	*ld_lwp;
92 	uintptr_t	ld_locked;
93 	uintptr_t	ld_unlocked;
94 	uintptr_t	ld_initaddr;
95 	uint16_t	ld_shares;
96 	uint16_t	ld_cpu;
97 	uint8_t		ld_flags;
98 	uint8_t		ld_shwant;	/* advisory */
99 	uint8_t		ld_exwant;	/* advisory */
100 	uint8_t		ld_unused;
101 } volatile lockdebug_t;
102 
103 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
104 
105 lockdebuglk_t		ld_tree_lk;
106 lockdebuglk_t		ld_sleeper_lk;
107 lockdebuglk_t		ld_spinner_lk;
108 lockdebuglk_t		ld_free_lk;
109 
110 lockdebuglist_t		ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
111 lockdebuglist_t		ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
112 lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
113 lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
114 int			ld_nfree;
115 int			ld_freeptr;
116 int			ld_recurse;
117 bool			ld_nomore;
118 lockdebug_t		*ld_table[LD_MAX_LOCKS / LD_BATCH];
119 
120 lockdebug_t		ld_prime[LD_BATCH];
121 
122 static void	lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
123 				 const char *, const char *, bool);
124 static void	lockdebug_more(void);
125 static void	lockdebug_init(void);
126 
127 static signed int
128 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
129 {
130 	const lockdebug_t *ld1 = (const void *)n1;
131 	const lockdebug_t *ld2 = (const void *)n2;
132 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
133 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
134 
135 	if (a < b)
136 		return 1;
137 	if (a > b)
138 		return -1;
139 	return 0;
140 }
141 
142 static signed int
143 ld_rb_compare_key(const struct rb_node *n, const void *key)
144 {
145 	const lockdebug_t *ld = (const void *)n;
146 	const uintptr_t a = (uintptr_t)ld->ld_lock;
147 	const uintptr_t b = (uintptr_t)key;
148 
149 	if (a < b)
150 		return 1;
151 	if (a > b)
152 		return -1;
153 	return 0;
154 }
155 
156 static struct rb_tree ld_rb_tree;
157 
158 static const struct rb_tree_ops ld_rb_tree_ops = {
159 	.rb_compare_nodes = ld_rb_compare_nodes,
160 	.rb_compare_key = ld_rb_compare_key,
161 };
162 
163 static void
164 lockdebug_lock_init(lockdebuglk_t *lk)
165 {
166 
167 	lk->lk_lock = 0;
168 }
169 
170 static void
171 lockdebug_lock(lockdebuglk_t *lk)
172 {
173 	int s;
174 
175 	s = splhigh();
176 	do {
177 		while (lk->lk_lock != 0) {
178 			SPINLOCK_SPIN_HOOK;
179 		}
180 	} while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0);
181 	lk->lk_oldspl = s;
182 	membar_enter();
183 }
184 
185 static void
186 lockdebug_unlock(lockdebuglk_t *lk)
187 {
188 	int s;
189 
190 	s = lk->lk_oldspl;
191 	membar_exit();
192 	lk->lk_lock = 0;
193 	splx(s);
194 }
195 
196 static int
197 lockdebug_lock_rd(lockdebuglk_t *lk)
198 {
199 	u_int val;
200 	int s;
201 
202 	s = splhigh();
203 	do {
204 		while ((val = lk->lk_lock) == LD_WRITE_LOCK){
205 			SPINLOCK_SPIN_HOOK;
206 		}
207 	} while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val);
208 	membar_enter();
209 	return s;
210 }
211 
212 static void
213 lockdebug_unlock_rd(lockdebuglk_t *lk, int s)
214 {
215 
216 	membar_exit();
217 	atomic_dec_uint(&lk->lk_lock);
218 	splx(s);
219 }
220 
221 static inline lockdebug_t *
222 lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
223 {
224 	lockdebug_t *ld;
225 	int s;
226 
227 	s = lockdebug_lock_rd(&ld_tree_lk);
228 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
229 	lockdebug_unlock_rd(&ld_tree_lk, s);
230 	if (ld == NULL)
231 		return NULL;
232 
233 	if ((ld->ld_flags & LD_SLEEPER) != 0)
234 		*lk = &ld_sleeper_lk;
235 	else
236 		*lk = &ld_spinner_lk;
237 
238 	lockdebug_lock(*lk);
239 	return ld;
240 }
241 
242 /*
243  * lockdebug_lookup:
244  *
245  *	Find a lockdebug structure by a pointer to a lock and return it locked.
246  */
247 static inline lockdebug_t *
248 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
249 {
250 	lockdebug_t *ld;
251 
252 	ld = lockdebug_lookup1(lock, lk);
253 	if (ld == NULL)
254 		panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
255 	return ld;
256 }
257 
258 /*
259  * lockdebug_init:
260  *
261  *	Initialize the lockdebug system.  Allocate an initial pool of
262  *	lockdebug structures before the VM system is up and running.
263  */
264 static void
265 lockdebug_init(void)
266 {
267 	lockdebug_t *ld;
268 	int i;
269 
270 	lockdebug_lock_init(&ld_tree_lk);
271 	lockdebug_lock_init(&ld_sleeper_lk);
272 	lockdebug_lock_init(&ld_spinner_lk);
273 	lockdebug_lock_init(&ld_free_lk);
274 
275 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
276 
277 	ld = ld_prime;
278 	ld_table[0] = ld;
279 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
280 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
281 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
282 	}
283 	ld_freeptr = 1;
284 	ld_nfree = LD_BATCH - 1;
285 }
286 
287 /*
288  * lockdebug_alloc:
289  *
290  *	A lock is being initialized, so allocate an associated debug
291  *	structure.
292  */
293 bool
294 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
295 {
296 	struct cpu_info *ci;
297 	lockdebug_t *ld;
298 	lockdebuglk_t *lk;
299 
300 	if (lo == NULL || panicstr != NULL)
301 		return false;
302 	if (ld_freeptr == 0)
303 		lockdebug_init();
304 
305 	if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
306 		lockdebug_abort1(ld, lk, __func__, "already initialized", true);
307 		/* NOTREACHED */
308 	}
309 
310 	/*
311 	 * Pinch a new debug structure.  We may recurse because we call
312 	 * kmem_alloc(), which may need to initialize new locks somewhere
313 	 * down the path.  If not recursing, we try to maintain at least
314 	 * LD_SLOP structures free, which should hopefully be enough to
315 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
316 	 * worry: we'll just mark the lock as not having an ID.
317 	 */
318 	lockdebug_lock(&ld_free_lk);
319 	ci = curcpu();
320 	ci->ci_lkdebug_recurse++;
321 
322 	if (TAILQ_EMPTY(&ld_free)) {
323 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
324 			ci->ci_lkdebug_recurse--;
325 			lockdebug_unlock(&ld_free_lk);
326 			return false;
327 		}
328 		lockdebug_more();
329 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
330 		lockdebug_more();
331 
332 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
333 		lockdebug_unlock(&ld_free_lk);
334 		return false;
335 	}
336 
337 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
338 	ld_nfree--;
339 
340 	ci->ci_lkdebug_recurse--;
341 	lockdebug_unlock(&ld_free_lk);
342 
343 	if (ld->ld_lock != NULL)
344 		panic("lockdebug_alloc: corrupt table");
345 
346 	if (lo->lo_sleeplock)
347 		lockdebug_lock(&ld_sleeper_lk);
348 	else
349 		lockdebug_lock(&ld_spinner_lk);
350 
351 	/* Initialise the structure. */
352 	ld->ld_lock = lock;
353 	ld->ld_lockops = lo;
354 	ld->ld_locked = 0;
355 	ld->ld_unlocked = 0;
356 	ld->ld_lwp = NULL;
357 	ld->ld_initaddr = initaddr;
358 
359 	lockdebug_lock(&ld_tree_lk);
360 	rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
361 	lockdebug_unlock(&ld_tree_lk);
362 
363 	if (lo->lo_sleeplock) {
364 		ld->ld_flags = LD_SLEEPER;
365 		lockdebug_unlock(&ld_sleeper_lk);
366 	} else {
367 		ld->ld_flags = 0;
368 		lockdebug_unlock(&ld_spinner_lk);
369 	}
370 
371 	return true;
372 }
373 
374 /*
375  * lockdebug_free:
376  *
377  *	A lock is being destroyed, so release debugging resources.
378  */
379 void
380 lockdebug_free(volatile void *lock)
381 {
382 	lockdebug_t *ld;
383 	lockdebuglk_t *lk;
384 
385 	if (panicstr != NULL)
386 		return;
387 
388 	ld = lockdebug_lookup(lock, &lk);
389 	if (ld == NULL) {
390 		panic("lockdebug_free: destroying uninitialized lock %p"
391 		    "(ld_lock=%p)", lock, ld->ld_lock);
392 		lockdebug_abort1(ld, lk, __func__, "lock record follows",
393 		    true);
394 	}
395 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
396 		lockdebug_abort1(ld, lk, __func__, "is locked", true);
397 	lockdebug_lock(&ld_tree_lk);
398 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
399 	lockdebug_unlock(&ld_tree_lk);
400 	ld->ld_lock = NULL;
401 	lockdebug_unlock(lk);
402 
403 	lockdebug_lock(&ld_free_lk);
404 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
405 	ld_nfree++;
406 	lockdebug_unlock(&ld_free_lk);
407 }
408 
409 /*
410  * lockdebug_more:
411  *
412  *	Allocate a batch of debug structures and add to the free list.
413  *	Must be called with ld_free_lk held.
414  */
415 static void
416 lockdebug_more(void)
417 {
418 	lockdebug_t *ld;
419 	void *block;
420 	int i, base, m;
421 
422 	while (ld_nfree < LD_SLOP) {
423 		lockdebug_unlock(&ld_free_lk);
424 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
425 		lockdebug_lock(&ld_free_lk);
426 
427 		if (block == NULL)
428 			return;
429 
430 		if (ld_nfree > LD_SLOP) {
431 			/* Somebody beat us to it. */
432 			lockdebug_unlock(&ld_free_lk);
433 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
434 			lockdebug_lock(&ld_free_lk);
435 			continue;
436 		}
437 
438 		base = ld_freeptr;
439 		ld_nfree += LD_BATCH;
440 		ld = block;
441 		base <<= LD_BATCH_SHIFT;
442 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
443 
444 		if (m == LD_MAX_LOCKS)
445 			ld_nomore = true;
446 
447 		for (i = base; i < m; i++, ld++) {
448 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
449 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
450 		}
451 
452 		membar_producer();
453 		ld_table[ld_freeptr++] = block;
454 	}
455 }
456 
457 /*
458  * lockdebug_wantlock:
459  *
460  *	Process the preamble to a lock acquire.
461  */
462 void
463 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
464 {
465 	struct lwp *l = curlwp;
466 	lockdebuglk_t *lk;
467 	lockdebug_t *ld;
468 	bool recurse;
469 
470 	(void)shared;
471 	recurse = false;
472 
473 	if (panicstr != NULL)
474 		return;
475 
476 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
477 		return;
478 
479 	if ((ld->ld_flags & LD_LOCKED) != 0) {
480 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
481 			if (ld->ld_lwp == l)
482 				recurse = true;
483 		} else if (ld->ld_cpu == (uint16_t)cpu_number())
484 			recurse = true;
485 	}
486 
487 	if (cpu_intr_p()) {
488 		if ((ld->ld_flags & LD_SLEEPER) != 0)
489 			lockdebug_abort1(ld, lk, __func__,
490 			    "acquiring sleep lock from interrupt context",
491 			    true);
492 	}
493 
494 	if (shared)
495 		ld->ld_shwant++;
496 	else
497 		ld->ld_exwant++;
498 
499 	if (recurse)
500 		lockdebug_abort1(ld, lk, __func__, "locking against myself",
501 		    true);
502 
503 	lockdebug_unlock(lk);
504 }
505 
506 /*
507  * lockdebug_locked:
508  *
509  *	Process a lock acquire operation.
510  */
511 void
512 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
513 {
514 	struct lwp *l = curlwp;
515 	lockdebuglk_t *lk;
516 	lockdebug_t *ld;
517 
518 	if (panicstr != NULL)
519 		return;
520 
521 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
522 		return;
523 
524 	if (shared) {
525 		l->l_shlocks++;
526 		ld->ld_shares++;
527 		ld->ld_shwant--;
528 	} else {
529 		if ((ld->ld_flags & LD_LOCKED) != 0)
530 			lockdebug_abort1(ld, lk, __func__,
531 			    "already locked", true);
532 
533 		ld->ld_flags |= LD_LOCKED;
534 		ld->ld_locked = where;
535 		ld->ld_cpu = (uint16_t)cpu_number();
536 		ld->ld_lwp = l;
537 		ld->ld_exwant--;
538 
539 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
540 			l->l_exlocks++;
541 			TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
542 		} else {
543 			curcpu()->ci_spin_locks2++;
544 			TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
545 		}
546 	}
547 
548 	lockdebug_unlock(lk);
549 }
550 
551 /*
552  * lockdebug_unlocked:
553  *
554  *	Process a lock release operation.
555  */
556 void
557 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
558 {
559 	struct lwp *l = curlwp;
560 	lockdebuglk_t *lk;
561 	lockdebug_t *ld;
562 
563 	if (panicstr != NULL)
564 		return;
565 
566 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
567 		return;
568 
569 	if (shared) {
570 		if (l->l_shlocks == 0)
571 			lockdebug_abort1(ld, lk, __func__,
572 			    "no shared locks held by LWP", true);
573 		if (ld->ld_shares == 0)
574 			lockdebug_abort1(ld, lk, __func__,
575 			    "no shared holds on this lock", true);
576 		l->l_shlocks--;
577 		ld->ld_shares--;
578 	} else {
579 		if ((ld->ld_flags & LD_LOCKED) == 0)
580 			lockdebug_abort1(ld, lk, __func__, "not locked",
581 			    true);
582 
583 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
584 			if (ld->ld_lwp != curlwp)
585 				lockdebug_abort1(ld, lk, __func__,
586 				    "not held by current LWP", true);
587 			ld->ld_flags &= ~LD_LOCKED;
588 			ld->ld_unlocked = where;
589 			ld->ld_lwp = NULL;
590 			curlwp->l_exlocks--;
591 			TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
592 		} else {
593 			if (ld->ld_cpu != (uint16_t)cpu_number())
594 				lockdebug_abort1(ld, lk, __func__,
595 				    "not held by current CPU", true);
596 			ld->ld_flags &= ~LD_LOCKED;
597 			ld->ld_unlocked = where;
598 			ld->ld_lwp = NULL;
599 			curcpu()->ci_spin_locks2--;
600 			TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
601 		}
602 	}
603 
604 	lockdebug_unlock(lk);
605 }
606 
607 /*
608  * lockdebug_barrier:
609  *
610  *	Panic if we hold more than one specified spin lock, and optionally,
611  *	if we hold sleep locks.
612  */
613 void
614 lockdebug_barrier(volatile void *spinlock, int slplocks)
615 {
616 	struct lwp *l = curlwp;
617 	lockdebug_t *ld;
618 	uint16_t cpuno;
619 	int s;
620 
621 	if (panicstr != NULL)
622 		return;
623 
624 	crit_enter();
625 
626 	if (curcpu()->ci_spin_locks2 != 0) {
627 		cpuno = (uint16_t)cpu_number();
628 
629 		s = lockdebug_lock_rd(&ld_spinner_lk);
630 		TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
631 			if (ld->ld_lock == spinlock) {
632 				if (ld->ld_cpu != cpuno)
633 					lockdebug_abort1(ld, &ld_spinner_lk,
634 					    __func__,
635 					    "not held by current CPU", true);
636 				continue;
637 			}
638 			if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0)
639 				lockdebug_abort1(ld, &ld_spinner_lk,
640 				    __func__, "spin lock held", true);
641 		}
642 		lockdebug_unlock_rd(&ld_spinner_lk, s);
643 	}
644 
645 	if (!slplocks) {
646 		if (l->l_exlocks != 0) {
647 			s = lockdebug_lock_rd(&ld_sleeper_lk);
648 			TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
649 				if (ld->ld_lwp == l)
650 					lockdebug_abort1(ld, &ld_sleeper_lk,
651 					    __func__, "sleep lock held", true);
652 			}
653 			lockdebug_unlock_rd(&ld_sleeper_lk, s);
654 		}
655 		if (l->l_shlocks != 0)
656 			panic("lockdebug_barrier: holding %d shared locks",
657 			    l->l_shlocks);
658 	}
659 
660 	crit_exit();
661 }
662 
663 /*
664  * lockdebug_mem_check:
665  *
666  *	Check for in-use locks within a memory region that is
667  *	being freed.
668  */
669 void
670 lockdebug_mem_check(const char *func, void *base, size_t sz)
671 {
672 	lockdebug_t *ld;
673 	lockdebuglk_t *lk;
674 	int s;
675 
676 	s = lockdebug_lock_rd(&ld_tree_lk);
677 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
678 	if (ld != NULL) {
679 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
680 
681 		if ((uintptr_t)base > lock)
682 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
683 			    __func__, ld, base, sz);
684 		if (lock >= (uintptr_t)base + sz)
685 			ld = NULL;
686 	}
687 	lockdebug_unlock_rd(&ld_tree_lk, s);
688 	if (ld == NULL)
689 		return;
690 
691 	if ((ld->ld_flags & LD_SLEEPER) != 0)
692 		lk = &ld_sleeper_lk;
693 	else
694 		lk = &ld_spinner_lk;
695 
696 	lockdebug_lock(lk);
697 	lockdebug_abort1(ld, lk, func,
698 	    "allocation contains active lock", !cold);
699 	return;
700 }
701 
702 /*
703  * lockdebug_dump:
704  *
705  *	Dump information about a lock on panic, or for DDB.
706  */
707 static void
708 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
709 {
710 	int sleeper = (ld->ld_flags & LD_SLEEPER);
711 
712 	(*pr)(
713 	    "lock address : %#018lx type     : %18s\n"
714 	    "shared holds : %18u exclusive: %18u\n"
715 	    "shares wanted: %18u exclusive: %18u\n"
716 	    "current cpu  : %18u last held: %18u\n"
717 	    "current lwp  : %#018lx last held: %#018lx\n"
718 	    "last locked  : %#018lx unlocked : %#018lx\n"
719 	    "initialized  : %#018lx\n",
720 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
721 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
722 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
723 	    (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
724 	    (long)curlwp, (long)ld->ld_lwp,
725 	    (long)ld->ld_locked, (long)ld->ld_unlocked,
726 	    (long)ld->ld_initaddr);
727 
728 	if (ld->ld_lockops->lo_dump != NULL)
729 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
730 
731 	if (sleeper) {
732 		(*pr)("\n");
733 		turnstile_print(ld->ld_lock, pr);
734 	}
735 }
736 
737 /*
738  * lockdebug_dump:
739  *
740  *	Dump information about a known lock.
741  */
742 static void
743 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
744 		 const char *msg, bool dopanic)
745 {
746 
747 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
748 	    func, msg);
749 	lockdebug_dump(ld, printf_nolog);
750 	lockdebug_unlock(lk);
751 	printf_nolog("\n");
752 	if (dopanic)
753 		panic("LOCKDEBUG");
754 }
755 
756 #endif	/* LOCKDEBUG */
757 
758 /*
759  * lockdebug_lock_print:
760  *
761  *	Handle the DDB 'show lock' command.
762  */
763 #ifdef DDB
764 void
765 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
766 {
767 #ifdef LOCKDEBUG
768 	lockdebug_t *ld;
769 
770 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
771 		if (ld->ld_lock == addr) {
772 			lockdebug_dump(ld, pr);
773 			return;
774 		}
775 	}
776 	(*pr)("Sorry, no record of a lock with address %p found.\n", addr);
777 #else
778 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
779 #endif	/* LOCKDEBUG */
780 }
781 #endif	/* DDB */
782 
783 /*
784  * lockdebug_abort:
785  *
786  *	An error has been trapped - dump lock info and call panic().
787  */
788 void
789 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
790 		const char *msg)
791 {
792 #ifdef LOCKDEBUG
793 	lockdebug_t *ld;
794 	lockdebuglk_t *lk;
795 
796 	if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
797 		lockdebug_abort1(ld, lk, func, msg, true);
798 		/* NOTREACHED */
799 	}
800 #endif	/* LOCKDEBUG */
801 
802 	printf_nolog("%s error: %s: %s\n\n"
803 	    "lock address : %#018lx\n"
804 	    "current cpu  : %18d\n"
805 	    "current lwp  : %#018lx\n",
806 	    ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
807 	    (long)curlwp);
808 
809 	(*ops->lo_dump)(lock);
810 
811 	printf_nolog("\n");
812 	panic("lock error");
813 }
814