xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision aad9773e38ed2370a628a6416e098f9008fc10a7)
1 /*	$NetBSD: subr_lockdebug.c,v 1.52 2014/11/24 02:36:31 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Basic lock debugging code shared among lock primitives.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.52 2014/11/24 02:36:31 christos Exp $");
38 
39 #include "opt_ddb.h"
40 
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51 #include <sys/rbtree.h>
52 
53 #include <machine/lock.h>
54 
55 unsigned int		ld_panic;
56 
57 #ifdef LOCKDEBUG
58 
59 #define	LD_BATCH_SHIFT	9
60 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
61 #define	LD_BATCH_MASK	(LD_BATCH - 1)
62 #define	LD_MAX_LOCKS	1048576
63 #define	LD_SLOP		16
64 
65 #define	LD_LOCKED	0x01
66 #define	LD_SLEEPER	0x02
67 
68 #define	LD_WRITE_LOCK	0x80000000
69 
70 typedef struct lockdebug {
71 	struct rb_node	ld_rb_node;
72 	__cpu_simple_lock_t ld_spinlock;
73 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
74 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
75 	volatile void	*ld_lock;
76 	lockops_t	*ld_lockops;
77 	struct lwp	*ld_lwp;
78 	uintptr_t	ld_locked;
79 	uintptr_t	ld_unlocked;
80 	uintptr_t	ld_initaddr;
81 	uint16_t	ld_shares;
82 	uint16_t	ld_cpu;
83 	uint8_t		ld_flags;
84 	uint8_t		ld_shwant;	/* advisory */
85 	uint8_t		ld_exwant;	/* advisory */
86 	uint8_t		ld_unused;
87 } volatile lockdebug_t;
88 
89 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
90 
91 __cpu_simple_lock_t	ld_mod_lk;
92 lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
93 lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
94 int			ld_nfree;
95 int			ld_freeptr;
96 int			ld_recurse;
97 bool			ld_nomore;
98 lockdebug_t		ld_prime[LD_BATCH];
99 
100 static void	lockdebug_abort1(lockdebug_t *, int, const char *,
101 				 const char *, bool);
102 static int	lockdebug_more(int);
103 static void	lockdebug_init(void);
104 static void	lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
105     __printflike(1, 2));
106 
107 static signed int
108 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
109 {
110 	const lockdebug_t *ld1 = n1;
111 	const lockdebug_t *ld2 = n2;
112 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
113 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
114 
115 	if (a < b)
116 		return -1;
117 	if (a > b)
118 		return 1;
119 	return 0;
120 }
121 
122 static signed int
123 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
124 {
125 	const lockdebug_t *ld = n;
126 	const uintptr_t a = (uintptr_t)ld->ld_lock;
127 	const uintptr_t b = (uintptr_t)key;
128 
129 	if (a < b)
130 		return -1;
131 	if (a > b)
132 		return 1;
133 	return 0;
134 }
135 
136 static rb_tree_t ld_rb_tree;
137 
138 static const rb_tree_ops_t ld_rb_tree_ops = {
139 	.rbto_compare_nodes = ld_rbto_compare_nodes,
140 	.rbto_compare_key = ld_rbto_compare_key,
141 	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
142 	.rbto_context = NULL
143 };
144 
145 static inline lockdebug_t *
146 lockdebug_lookup1(volatile void *lock)
147 {
148 	lockdebug_t *ld;
149 	struct cpu_info *ci;
150 
151 	ci = curcpu();
152 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
153 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
154 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
155 	if (ld == NULL) {
156 		return NULL;
157 	}
158 	__cpu_simple_lock(&ld->ld_spinlock);
159 
160 	return ld;
161 }
162 
163 static void
164 lockdebug_lock_cpus(void)
165 {
166 	CPU_INFO_ITERATOR cii;
167 	struct cpu_info *ci;
168 
169 	for (CPU_INFO_FOREACH(cii, ci)) {
170 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
171 	}
172 }
173 
174 static void
175 lockdebug_unlock_cpus(void)
176 {
177 	CPU_INFO_ITERATOR cii;
178 	struct cpu_info *ci;
179 
180 	for (CPU_INFO_FOREACH(cii, ci)) {
181 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
182 	}
183 }
184 
185 /*
186  * lockdebug_lookup:
187  *
188  *	Find a lockdebug structure by a pointer to a lock and return it locked.
189  */
190 static inline lockdebug_t *
191 lockdebug_lookup(volatile void *lock, uintptr_t where)
192 {
193 	lockdebug_t *ld;
194 
195 	ld = lockdebug_lookup1(lock);
196 	if (ld == NULL) {
197 		panic("lockdebug_lookup: uninitialized lock "
198 		    "(lock=%p, from=%08"PRIxPTR")", lock, where);
199 	}
200 	return ld;
201 }
202 
203 /*
204  * lockdebug_init:
205  *
206  *	Initialize the lockdebug system.  Allocate an initial pool of
207  *	lockdebug structures before the VM system is up and running.
208  */
209 static void
210 lockdebug_init(void)
211 {
212 	lockdebug_t *ld;
213 	int i;
214 
215 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
216 	TAILQ_INIT(&curlwp->l_ld_locks);
217 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
218 	__cpu_simple_lock_init(&ld_mod_lk);
219 
220 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
221 
222 	ld = ld_prime;
223 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
224 		__cpu_simple_lock_init(&ld->ld_spinlock);
225 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
226 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
227 	}
228 	ld_freeptr = 1;
229 	ld_nfree = LD_BATCH - 1;
230 }
231 
232 /*
233  * lockdebug_alloc:
234  *
235  *	A lock is being initialized, so allocate an associated debug
236  *	structure.
237  */
238 bool
239 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
240 {
241 	struct cpu_info *ci;
242 	lockdebug_t *ld;
243 	int s;
244 
245 	if (lo == NULL || panicstr != NULL || ld_panic)
246 		return false;
247 	if (ld_freeptr == 0)
248 		lockdebug_init();
249 
250 	s = splhigh();
251 	__cpu_simple_lock(&ld_mod_lk);
252 	if ((ld = lockdebug_lookup1(lock)) != NULL) {
253 		__cpu_simple_unlock(&ld_mod_lk);
254 		lockdebug_abort1(ld, s, __func__, "already initialized", true);
255 		return false;
256 	}
257 
258 	/*
259 	 * Pinch a new debug structure.  We may recurse because we call
260 	 * kmem_alloc(), which may need to initialize new locks somewhere
261 	 * down the path.  If not recursing, we try to maintain at least
262 	 * LD_SLOP structures free, which should hopefully be enough to
263 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
264 	 * worry: we'll just mark the lock as not having an ID.
265 	 */
266 	ci = curcpu();
267 	ci->ci_lkdebug_recurse++;
268 	if (TAILQ_EMPTY(&ld_free)) {
269 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
270 			ci->ci_lkdebug_recurse--;
271 			__cpu_simple_unlock(&ld_mod_lk);
272 			splx(s);
273 			return false;
274 		}
275 		s = lockdebug_more(s);
276 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
277 		s = lockdebug_more(s);
278 	}
279 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
280 		__cpu_simple_unlock(&ld_mod_lk);
281 		splx(s);
282 		return false;
283 	}
284 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
285 	ld_nfree--;
286 	ci->ci_lkdebug_recurse--;
287 
288 	if (ld->ld_lock != NULL) {
289 		panic("lockdebug_alloc: corrupt table ld %p", ld);
290 	}
291 
292 	/* Initialise the structure. */
293 	ld->ld_lock = lock;
294 	ld->ld_lockops = lo;
295 	ld->ld_locked = 0;
296 	ld->ld_unlocked = 0;
297 	ld->ld_lwp = NULL;
298 	ld->ld_initaddr = initaddr;
299 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
300 	lockdebug_lock_cpus();
301 	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
302 	lockdebug_unlock_cpus();
303 	__cpu_simple_unlock(&ld_mod_lk);
304 
305 	splx(s);
306 	return true;
307 }
308 
309 /*
310  * lockdebug_free:
311  *
312  *	A lock is being destroyed, so release debugging resources.
313  */
314 void
315 lockdebug_free(volatile void *lock)
316 {
317 	lockdebug_t *ld;
318 	int s;
319 
320 	if (panicstr != NULL || ld_panic)
321 		return;
322 
323 	s = splhigh();
324 	__cpu_simple_lock(&ld_mod_lk);
325 	ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
326 	if (ld == NULL) {
327 		__cpu_simple_unlock(&ld_mod_lk);
328 		panic("lockdebug_free: destroying uninitialized object %p"
329 		    "(ld_lock=%p)", lock, ld->ld_lock);
330 		return;
331 	}
332 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
333 		__cpu_simple_unlock(&ld_mod_lk);
334 		lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
335 		return;
336 	}
337 	lockdebug_lock_cpus();
338 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
339 	lockdebug_unlock_cpus();
340 	ld->ld_lock = NULL;
341 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
342 	ld_nfree++;
343 	__cpu_simple_unlock(&ld->ld_spinlock);
344 	__cpu_simple_unlock(&ld_mod_lk);
345 	splx(s);
346 }
347 
348 /*
349  * lockdebug_more:
350  *
351  *	Allocate a batch of debug structures and add to the free list.
352  *	Must be called with ld_mod_lk held.
353  */
354 static int
355 lockdebug_more(int s)
356 {
357 	lockdebug_t *ld;
358 	void *block;
359 	int i, base, m;
360 
361 	/*
362 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
363 	 * deadlock, because we don't know which locks the caller holds.
364 	 */
365 	if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
366 		return s;
367 	}
368 
369 	while (ld_nfree < LD_SLOP) {
370 		__cpu_simple_unlock(&ld_mod_lk);
371 		splx(s);
372 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
373 		s = splhigh();
374 		__cpu_simple_lock(&ld_mod_lk);
375 
376 		if (block == NULL)
377 			return s;
378 
379 		if (ld_nfree > LD_SLOP) {
380 			/* Somebody beat us to it. */
381 			__cpu_simple_unlock(&ld_mod_lk);
382 			splx(s);
383 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
384 			s = splhigh();
385 			__cpu_simple_lock(&ld_mod_lk);
386 			continue;
387 		}
388 
389 		base = ld_freeptr;
390 		ld_nfree += LD_BATCH;
391 		ld = block;
392 		base <<= LD_BATCH_SHIFT;
393 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
394 
395 		if (m == LD_MAX_LOCKS)
396 			ld_nomore = true;
397 
398 		for (i = base; i < m; i++, ld++) {
399 			__cpu_simple_lock_init(&ld->ld_spinlock);
400 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
401 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
402 		}
403 
404 		membar_producer();
405 	}
406 
407 	return s;
408 }
409 
410 /*
411  * lockdebug_wantlock:
412  *
413  *	Process the preamble to a lock acquire.
414  */
415 void
416 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
417 {
418 	struct lwp *l = curlwp;
419 	lockdebug_t *ld;
420 	bool recurse;
421 	int s;
422 
423 	(void)shared;
424 	recurse = false;
425 
426 	if (panicstr != NULL || ld_panic)
427 		return;
428 
429 	s = splhigh();
430 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
431 		splx(s);
432 		return;
433 	}
434 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
435 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
436 			if (ld->ld_lwp == l)
437 				recurse = true;
438 		} else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
439 			recurse = true;
440 	}
441 	if (cpu_intr_p()) {
442 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
443 			lockdebug_abort1(ld, s, __func__,
444 			    "acquiring sleep lock from interrupt context",
445 			    true);
446 			return;
447 		}
448 	}
449 	if (shared)
450 		ld->ld_shwant++;
451 	else
452 		ld->ld_exwant++;
453 	if (recurse) {
454 		lockdebug_abort1(ld, s, __func__, "locking against myself",
455 		    true);
456 		return;
457 	}
458 	__cpu_simple_unlock(&ld->ld_spinlock);
459 	splx(s);
460 }
461 
462 /*
463  * lockdebug_locked:
464  *
465  *	Process a lock acquire operation.
466  */
467 void
468 lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
469 		 int shared)
470 {
471 	struct lwp *l = curlwp;
472 	lockdebug_t *ld;
473 	int s;
474 
475 	if (panicstr != NULL || ld_panic)
476 		return;
477 
478 	s = splhigh();
479 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
480 		splx(s);
481 		return;
482 	}
483 	if (cvlock) {
484 		KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
485 		if (lock == (void *)&lbolt) {
486 			/* nothing */
487 		} else if (ld->ld_shares++ == 0) {
488 			ld->ld_locked = (uintptr_t)cvlock;
489 		} else if (cvlock != (void *)ld->ld_locked) {
490 			lockdebug_abort1(ld, s, __func__, "multiple locks used"
491 			    " with condition variable", true);
492 			return;
493 		}
494 	} else if (shared) {
495 		l->l_shlocks++;
496 		ld->ld_locked = where;
497 		ld->ld_shares++;
498 		ld->ld_shwant--;
499 	} else {
500 		if ((ld->ld_flags & LD_LOCKED) != 0) {
501 			lockdebug_abort1(ld, s, __func__, "already locked",
502 			    true);
503 			return;
504 		}
505 		ld->ld_flags |= LD_LOCKED;
506 		ld->ld_locked = where;
507 		ld->ld_exwant--;
508 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
509 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
510 		} else {
511 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
512 			    ld, ld_chain);
513 		}
514 	}
515 	ld->ld_cpu = (uint16_t)cpu_index(curcpu());
516 	ld->ld_lwp = l;
517 	__cpu_simple_unlock(&ld->ld_spinlock);
518 	splx(s);
519 }
520 
521 /*
522  * lockdebug_unlocked:
523  *
524  *	Process a lock release operation.
525  */
526 void
527 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
528 {
529 	struct lwp *l = curlwp;
530 	lockdebug_t *ld;
531 	int s;
532 
533 	if (panicstr != NULL || ld_panic)
534 		return;
535 
536 	s = splhigh();
537 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
538 		splx(s);
539 		return;
540 	}
541 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
542 		if (lock == (void *)&lbolt) {
543 			/* nothing */
544 		} else {
545 			ld->ld_shares--;
546 		}
547 	} else if (shared) {
548 		if (l->l_shlocks == 0) {
549 			lockdebug_abort1(ld, s, __func__,
550 			    "no shared locks held by LWP", true);
551 			return;
552 		}
553 		if (ld->ld_shares == 0) {
554 			lockdebug_abort1(ld, s, __func__,
555 			    "no shared holds on this lock", true);
556 			return;
557 		}
558 		l->l_shlocks--;
559 		ld->ld_shares--;
560 		if (ld->ld_lwp == l) {
561 			ld->ld_unlocked = where;
562 			ld->ld_lwp = NULL;
563 		}
564 		if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
565 			ld->ld_cpu = (uint16_t)-1;
566 	} else {
567 		if ((ld->ld_flags & LD_LOCKED) == 0) {
568 			lockdebug_abort1(ld, s, __func__, "not locked", true);
569 			return;
570 		}
571 
572 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
573 			if (ld->ld_lwp != curlwp) {
574 				lockdebug_abort1(ld, s, __func__,
575 				    "not held by current LWP", true);
576 				return;
577 			}
578 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
579 		} else {
580 			if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
581 				lockdebug_abort1(ld, s, __func__,
582 				    "not held by current CPU", true);
583 				return;
584 			}
585 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
586 			    ld_chain);
587 		}
588 		ld->ld_flags &= ~LD_LOCKED;
589 		ld->ld_unlocked = where;
590 		ld->ld_lwp = NULL;
591 	}
592 	__cpu_simple_unlock(&ld->ld_spinlock);
593 	splx(s);
594 }
595 
596 /*
597  * lockdebug_wakeup:
598  *
599  *	Process a wakeup on a condition variable.
600  */
601 void
602 lockdebug_wakeup(volatile void *lock, uintptr_t where)
603 {
604 	lockdebug_t *ld;
605 	int s;
606 
607 	if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
608 		return;
609 
610 	s = splhigh();
611 	/* Find the CV... */
612 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
613 		splx(s);
614 		return;
615 	}
616 	/*
617 	 * If it has any waiters, ensure that they are using the
618 	 * same interlock.
619 	 */
620 	if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
621 		lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
622 		    "held during wakeup", true);
623 		return;
624 	}
625 	__cpu_simple_unlock(&ld->ld_spinlock);
626 	splx(s);
627 }
628 
629 /*
630  * lockdebug_barrier:
631  *
632  *	Panic if we hold more than one specified spin lock, and optionally,
633  *	if we hold sleep locks.
634  */
635 void
636 lockdebug_barrier(volatile void *spinlock, int slplocks)
637 {
638 	struct lwp *l = curlwp;
639 	lockdebug_t *ld;
640 	int s;
641 
642 	if (panicstr != NULL || ld_panic)
643 		return;
644 
645 	s = splhigh();
646 	if ((l->l_pflag & LP_INTR) == 0) {
647 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
648 			if (ld->ld_lock == spinlock) {
649 				continue;
650 			}
651 			__cpu_simple_lock(&ld->ld_spinlock);
652 			lockdebug_abort1(ld, s, __func__,
653 			    "spin lock held", true);
654 			return;
655 		}
656 	}
657 	if (slplocks) {
658 		splx(s);
659 		return;
660 	}
661 	if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
662 		__cpu_simple_lock(&ld->ld_spinlock);
663 		lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
664 		return;
665 	}
666 	splx(s);
667 	if (l->l_shlocks != 0) {
668 		TAILQ_FOREACH(ld, &ld_all, ld_achain) {
669 			if (ld->ld_lockops->lo_type == LOCKOPS_CV)
670 				continue;
671 			if (ld->ld_lwp == l)
672 				lockdebug_dump(ld, printf);
673 		}
674 		panic("%s: holding %d shared locks", __func__, l->l_shlocks);
675 	}
676 }
677 
678 /*
679  * lockdebug_mem_check:
680  *
681  *	Check for in-use locks within a memory region that is
682  *	being freed.
683  */
684 void
685 lockdebug_mem_check(const char *func, void *base, size_t sz)
686 {
687 	lockdebug_t *ld;
688 	struct cpu_info *ci;
689 	int s;
690 
691 	if (panicstr != NULL || ld_panic)
692 		return;
693 
694 	s = splhigh();
695 	ci = curcpu();
696 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
697 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
698 	if (ld != NULL) {
699 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
700 
701 		if ((uintptr_t)base > lock)
702 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
703 			    __func__, ld, base, sz);
704 		if (lock >= (uintptr_t)base + sz)
705 			ld = NULL;
706 	}
707 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
708 	if (ld != NULL) {
709 		__cpu_simple_lock(&ld->ld_spinlock);
710 		lockdebug_abort1(ld, s, func,
711 		    "allocation contains active lock", !cold);
712 		return;
713 	}
714 	splx(s);
715 }
716 
717 /*
718  * lockdebug_dump:
719  *
720  *	Dump information about a lock on panic, or for DDB.
721  */
722 static void
723 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
724     __printflike(1, 2))
725 {
726 	int sleeper = (ld->ld_flags & LD_SLEEPER);
727 
728 	(*pr)(
729 	    "lock address : %#018lx type     : %18s\n"
730 	    "initialized  : %#018lx",
731 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
732 	    (long)ld->ld_initaddr);
733 
734 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
735 		(*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
736 	} else {
737 		(*pr)("\n"
738 		    "shared holds : %18u exclusive: %18u\n"
739 		    "shares wanted: %18u exclusive: %18u\n"
740 		    "current cpu  : %18u last held: %18u\n"
741 		    "current lwp  : %#018lx last held: %#018lx\n"
742 		    "last locked%c : %#018lx unlocked%c: %#018lx\n",
743 		    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
744 		    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
745 		    (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
746 		    (long)curlwp, (long)ld->ld_lwp,
747 		    ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
748 		    (long)ld->ld_locked,
749 		    ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
750 		    (long)ld->ld_unlocked);
751 	}
752 
753 	if (ld->ld_lockops->lo_dump != NULL)
754 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
755 
756 	if (sleeper) {
757 		(*pr)("\n");
758 		turnstile_print(ld->ld_lock, pr);
759 	}
760 }
761 
762 /*
763  * lockdebug_abort1:
764  *
765  *	An error has been trapped - dump lock info and panic.
766  */
767 static void
768 lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
769 		 const char *msg, bool dopanic)
770 {
771 
772 	/*
773 	 * Don't make the situation worse if the system is already going
774 	 * down in flames.  Once a panic is triggered, lockdebug state
775 	 * becomes stale and cannot be trusted.
776 	 */
777 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
778 		__cpu_simple_unlock(&ld->ld_spinlock);
779 		splx(s);
780 		return;
781 	}
782 
783 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
784 	    func, msg);
785 	lockdebug_dump(ld, printf_nolog);
786 	__cpu_simple_unlock(&ld->ld_spinlock);
787 	splx(s);
788 	printf_nolog("\n");
789 	if (dopanic)
790 		panic("LOCKDEBUG: %s error: %s: %s", ld->ld_lockops->lo_name,
791 		    func, msg);
792 }
793 
794 #endif	/* LOCKDEBUG */
795 
796 /*
797  * lockdebug_lock_print:
798  *
799  *	Handle the DDB 'show lock' command.
800  */
801 #ifdef DDB
802 void
803 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
804 {
805 #ifdef LOCKDEBUG
806 	lockdebug_t *ld;
807 
808 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
809 		if (ld->ld_lock == NULL)
810 			continue;
811 		if (addr == NULL || ld->ld_lock == addr) {
812 			lockdebug_dump(ld, pr);
813 			if (addr != NULL)
814 				return;
815 		}
816 	}
817 	if (addr != NULL) {
818 		(*pr)("Sorry, no record of a lock with address %p found.\n",
819 		    addr);
820 	}
821 #else
822 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
823 #endif	/* LOCKDEBUG */
824 }
825 #endif	/* DDB */
826 
827 /*
828  * lockdebug_abort:
829  *
830  *	An error has been trapped - dump lock info and call panic().
831  */
832 void
833 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
834 		const char *msg)
835 {
836 #ifdef LOCKDEBUG
837 	lockdebug_t *ld;
838 	int s;
839 
840 	s = splhigh();
841 	if ((ld = lockdebug_lookup(lock,
842 			(uintptr_t) __builtin_return_address(0))) != NULL) {
843 		lockdebug_abort1(ld, s, func, msg, true);
844 		return;
845 	}
846 	splx(s);
847 #endif	/* LOCKDEBUG */
848 
849 	/*
850 	 * Complain first on the occurrance only.  Otherwise proceeed to
851 	 * panic where we will `rendezvous' with other CPUs if the machine
852 	 * is going down in flames.
853 	 */
854 	if (atomic_inc_uint_nv(&ld_panic) == 1) {
855 		printf_nolog("%s error: %s: %s\n\n"
856 		    "lock address : %#018lx\n"
857 		    "current cpu  : %18d\n"
858 		    "current lwp  : %#018lx\n",
859 		    ops->lo_name, func, msg, (long)lock,
860 		    (int)cpu_index(curcpu()), (long)curlwp);
861 		(*ops->lo_dump)(lock);
862 		printf_nolog("\n");
863 	}
864 
865 	panic("lock error");
866 }
867