xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision c9496f6b604074a9451a67df576a5b423068e71e)
1 /*	$NetBSD: subr_lockdebug.c,v 1.58 2017/09/16 23:55:33 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Basic lock debugging code shared among lock primitives.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.58 2017/09/16 23:55:33 christos Exp $");
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42 
43 #include <sys/param.h>
44 #include <sys/proc.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/kmem.h>
48 #include <sys/lockdebug.h>
49 #include <sys/sleepq.h>
50 #include <sys/cpu.h>
51 #include <sys/atomic.h>
52 #include <sys/lock.h>
53 #include <sys/rbtree.h>
54 
55 #include <machine/lock.h>
56 
57 unsigned int		ld_panic;
58 
59 #ifdef LOCKDEBUG
60 
61 #define	LD_BATCH_SHIFT	9
62 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
63 #define	LD_BATCH_MASK	(LD_BATCH - 1)
64 #define	LD_MAX_LOCKS	1048576
65 #define	LD_SLOP		16
66 
67 #define	LD_LOCKED	0x01
68 #define	LD_SLEEPER	0x02
69 
70 #define	LD_WRITE_LOCK	0x80000000
71 
72 typedef struct lockdebug {
73 	struct rb_node	ld_rb_node;
74 	__cpu_simple_lock_t ld_spinlock;
75 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
76 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
77 	volatile void	*ld_lock;
78 	lockops_t	*ld_lockops;
79 	struct lwp	*ld_lwp;
80 	uintptr_t	ld_locked;
81 	uintptr_t	ld_unlocked;
82 	uintptr_t	ld_initaddr;
83 	uint16_t	ld_shares;
84 	uint16_t	ld_cpu;
85 	uint8_t		ld_flags;
86 	uint8_t		ld_shwant;	/* advisory */
87 	uint8_t		ld_exwant;	/* advisory */
88 	uint8_t		ld_unused;
89 } volatile lockdebug_t;
90 
91 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
92 
93 __cpu_simple_lock_t	ld_mod_lk;
94 lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
95 lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
96 int			ld_nfree;
97 int			ld_freeptr;
98 int			ld_recurse;
99 bool			ld_nomore;
100 lockdebug_t		ld_prime[LD_BATCH];
101 
102 static void	lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
103     const char *, bool);
104 static int	lockdebug_more(int);
105 static void	lockdebug_init(void);
106 static void	lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
107     __printflike(1, 2));
108 
109 static signed int
110 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
111 {
112 	const lockdebug_t *ld1 = n1;
113 	const lockdebug_t *ld2 = n2;
114 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
115 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
116 
117 	if (a < b)
118 		return -1;
119 	if (a > b)
120 		return 1;
121 	return 0;
122 }
123 
124 static signed int
125 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
126 {
127 	const lockdebug_t *ld = n;
128 	const uintptr_t a = (uintptr_t)ld->ld_lock;
129 	const uintptr_t b = (uintptr_t)key;
130 
131 	if (a < b)
132 		return -1;
133 	if (a > b)
134 		return 1;
135 	return 0;
136 }
137 
138 static rb_tree_t ld_rb_tree;
139 
140 static const rb_tree_ops_t ld_rb_tree_ops = {
141 	.rbto_compare_nodes = ld_rbto_compare_nodes,
142 	.rbto_compare_key = ld_rbto_compare_key,
143 	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
144 	.rbto_context = NULL
145 };
146 
147 static inline lockdebug_t *
148 lockdebug_lookup1(const volatile void *lock)
149 {
150 	lockdebug_t *ld;
151 	struct cpu_info *ci;
152 
153 	ci = curcpu();
154 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
155 	ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
156 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
157 	if (ld == NULL) {
158 		return NULL;
159 	}
160 	__cpu_simple_lock(&ld->ld_spinlock);
161 
162 	return ld;
163 }
164 
165 static void
166 lockdebug_lock_cpus(void)
167 {
168 	CPU_INFO_ITERATOR cii;
169 	struct cpu_info *ci;
170 
171 	for (CPU_INFO_FOREACH(cii, ci)) {
172 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
173 	}
174 }
175 
176 static void
177 lockdebug_unlock_cpus(void)
178 {
179 	CPU_INFO_ITERATOR cii;
180 	struct cpu_info *ci;
181 
182 	for (CPU_INFO_FOREACH(cii, ci)) {
183 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
184 	}
185 }
186 
187 /*
188  * lockdebug_lookup:
189  *
190  *	Find a lockdebug structure by a pointer to a lock and return it locked.
191  */
192 static inline lockdebug_t *
193 lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
194     uintptr_t where)
195 {
196 	lockdebug_t *ld;
197 
198 	ld = lockdebug_lookup1(lock);
199 	if (ld == NULL) {
200 		panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
201 		    PRIxPTR ")", func, line, lock, where);
202 	}
203 	return ld;
204 }
205 
206 /*
207  * lockdebug_init:
208  *
209  *	Initialize the lockdebug system.  Allocate an initial pool of
210  *	lockdebug structures before the VM system is up and running.
211  */
212 static void
213 lockdebug_init(void)
214 {
215 	lockdebug_t *ld;
216 	int i;
217 
218 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
219 	TAILQ_INIT(&curlwp->l_ld_locks);
220 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
221 	__cpu_simple_lock_init(&ld_mod_lk);
222 
223 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
224 
225 	ld = ld_prime;
226 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
227 		__cpu_simple_lock_init(&ld->ld_spinlock);
228 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
229 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
230 	}
231 	ld_freeptr = 1;
232 	ld_nfree = LD_BATCH - 1;
233 }
234 
235 /*
236  * lockdebug_alloc:
237  *
238  *	A lock is being initialized, so allocate an associated debug
239  *	structure.
240  */
241 bool
242 lockdebug_alloc(const char *func, size_t line, volatile void *lock,
243     lockops_t *lo, uintptr_t initaddr)
244 {
245 	struct cpu_info *ci;
246 	lockdebug_t *ld;
247 	int s;
248 
249 	if (lo == NULL || panicstr != NULL || ld_panic)
250 		return false;
251 	if (ld_freeptr == 0)
252 		lockdebug_init();
253 
254 	s = splhigh();
255 	__cpu_simple_lock(&ld_mod_lk);
256 	if ((ld = lockdebug_lookup1(lock)) != NULL) {
257 		__cpu_simple_unlock(&ld_mod_lk);
258 		lockdebug_abort1(func, line, ld, s, "already initialized",
259 		    true);
260 		return false;
261 	}
262 
263 	/*
264 	 * Pinch a new debug structure.  We may recurse because we call
265 	 * kmem_alloc(), which may need to initialize new locks somewhere
266 	 * down the path.  If not recursing, we try to maintain at least
267 	 * LD_SLOP structures free, which should hopefully be enough to
268 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
269 	 * worry: we'll just mark the lock as not having an ID.
270 	 */
271 	ci = curcpu();
272 	ci->ci_lkdebug_recurse++;
273 	if (TAILQ_EMPTY(&ld_free)) {
274 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
275 			ci->ci_lkdebug_recurse--;
276 			__cpu_simple_unlock(&ld_mod_lk);
277 			splx(s);
278 			return false;
279 		}
280 		s = lockdebug_more(s);
281 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
282 		s = lockdebug_more(s);
283 	}
284 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
285 		__cpu_simple_unlock(&ld_mod_lk);
286 		splx(s);
287 		return false;
288 	}
289 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
290 	ld_nfree--;
291 	ci->ci_lkdebug_recurse--;
292 
293 	if (ld->ld_lock != NULL) {
294 		panic("%s,%zu: corrupt table ld %p", func, line, ld);
295 	}
296 
297 	/* Initialise the structure. */
298 	ld->ld_lock = lock;
299 	ld->ld_lockops = lo;
300 	ld->ld_locked = 0;
301 	ld->ld_unlocked = 0;
302 	ld->ld_lwp = NULL;
303 	ld->ld_initaddr = initaddr;
304 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
305 	lockdebug_lock_cpus();
306 	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
307 	lockdebug_unlock_cpus();
308 	__cpu_simple_unlock(&ld_mod_lk);
309 
310 	splx(s);
311 	return true;
312 }
313 
314 /*
315  * lockdebug_free:
316  *
317  *	A lock is being destroyed, so release debugging resources.
318  */
319 void
320 lockdebug_free(const char *func, size_t line, volatile void *lock)
321 {
322 	lockdebug_t *ld;
323 	int s;
324 
325 	if (panicstr != NULL || ld_panic)
326 		return;
327 
328 	s = splhigh();
329 	__cpu_simple_lock(&ld_mod_lk);
330 	ld = lockdebug_lookup(func, line, lock,
331 	    (uintptr_t) __builtin_return_address(0));
332 	if (ld == NULL) {
333 		__cpu_simple_unlock(&ld_mod_lk);
334 		panic("%s,%zu: destroying uninitialized object %p"
335 		    "(ld_lock=%p)", func, line, lock, ld->ld_lock);
336 		return;
337 	}
338 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
339 		__cpu_simple_unlock(&ld_mod_lk);
340 		lockdebug_abort1(func, line, ld, s, "is locked or in use",
341 		    true);
342 		return;
343 	}
344 	lockdebug_lock_cpus();
345 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
346 	lockdebug_unlock_cpus();
347 	ld->ld_lock = NULL;
348 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
349 	ld_nfree++;
350 	__cpu_simple_unlock(&ld->ld_spinlock);
351 	__cpu_simple_unlock(&ld_mod_lk);
352 	splx(s);
353 }
354 
355 /*
356  * lockdebug_more:
357  *
358  *	Allocate a batch of debug structures and add to the free list.
359  *	Must be called with ld_mod_lk held.
360  */
361 static int
362 lockdebug_more(int s)
363 {
364 	lockdebug_t *ld;
365 	void *block;
366 	int i, base, m;
367 
368 	/*
369 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
370 	 * deadlock, because we don't know which locks the caller holds.
371 	 */
372 	if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
373 		return s;
374 	}
375 
376 	while (ld_nfree < LD_SLOP) {
377 		__cpu_simple_unlock(&ld_mod_lk);
378 		splx(s);
379 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
380 		s = splhigh();
381 		__cpu_simple_lock(&ld_mod_lk);
382 
383 		if (ld_nfree > LD_SLOP) {
384 			/* Somebody beat us to it. */
385 			__cpu_simple_unlock(&ld_mod_lk);
386 			splx(s);
387 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
388 			s = splhigh();
389 			__cpu_simple_lock(&ld_mod_lk);
390 			continue;
391 		}
392 
393 		base = ld_freeptr;
394 		ld_nfree += LD_BATCH;
395 		ld = block;
396 		base <<= LD_BATCH_SHIFT;
397 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
398 
399 		if (m == LD_MAX_LOCKS)
400 			ld_nomore = true;
401 
402 		for (i = base; i < m; i++, ld++) {
403 			__cpu_simple_lock_init(&ld->ld_spinlock);
404 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
405 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
406 		}
407 
408 		membar_producer();
409 	}
410 
411 	return s;
412 }
413 
414 /*
415  * lockdebug_wantlock:
416  *
417  *	Process the preamble to a lock acquire.  The "shared"
418  *	parameter controls which ld_{ex,sh}want counter is
419  *	updated; a negative value of shared updates neither.
420  */
421 void
422 lockdebug_wantlock(const char *func, size_t line,
423     const volatile void *lock, uintptr_t where, int shared)
424 {
425 	struct lwp *l = curlwp;
426 	lockdebug_t *ld;
427 	bool recurse;
428 	int s;
429 
430 	(void)shared;
431 	recurse = false;
432 
433 	if (panicstr != NULL || ld_panic)
434 		return;
435 
436 	s = splhigh();
437 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
438 		splx(s);
439 		return;
440 	}
441 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
442 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
443 			if (ld->ld_lwp == l)
444 				recurse = true;
445 		} else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
446 			recurse = true;
447 	}
448 	if (cpu_intr_p()) {
449 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
450 			lockdebug_abort1(func, line, ld, s,
451 			    "acquiring sleep lock from interrupt context",
452 			    true);
453 			return;
454 		}
455 	}
456 	if (shared > 0)
457 		ld->ld_shwant++;
458 	else if (shared == 0)
459 		ld->ld_exwant++;
460 	if (recurse) {
461 		lockdebug_abort1(func, line, ld, s, "locking against myself",
462 		    true);
463 		return;
464 	}
465 	__cpu_simple_unlock(&ld->ld_spinlock);
466 	splx(s);
467 }
468 
469 /*
470  * lockdebug_locked:
471  *
472  *	Process a lock acquire operation.
473  */
474 void
475 lockdebug_locked(const char *func, size_t line,
476     volatile void *lock, void *cvlock, uintptr_t where, int shared)
477 {
478 	struct lwp *l = curlwp;
479 	lockdebug_t *ld;
480 	int s;
481 
482 	if (panicstr != NULL || ld_panic)
483 		return;
484 
485 	s = splhigh();
486 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
487 		splx(s);
488 		return;
489 	}
490 	if (cvlock) {
491 		KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
492 		if (lock == (void *)&lbolt) {
493 			/* nothing */
494 		} else if (ld->ld_shares++ == 0) {
495 			ld->ld_locked = (uintptr_t)cvlock;
496 		} else if (cvlock != (void *)ld->ld_locked) {
497 			lockdebug_abort1(func, line, ld, s,
498 			    "multiple locks used with condition variable",
499 			    true);
500 			return;
501 		}
502 	} else if (shared) {
503 		l->l_shlocks++;
504 		ld->ld_locked = where;
505 		ld->ld_shares++;
506 		ld->ld_shwant--;
507 	} else {
508 		if ((ld->ld_flags & LD_LOCKED) != 0) {
509 			lockdebug_abort1(func, line, ld, s, "already locked",
510 			    true);
511 			return;
512 		}
513 		ld->ld_flags |= LD_LOCKED;
514 		ld->ld_locked = where;
515 		ld->ld_exwant--;
516 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
517 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
518 		} else {
519 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
520 			    ld, ld_chain);
521 		}
522 	}
523 	ld->ld_cpu = (uint16_t)cpu_index(curcpu());
524 	ld->ld_lwp = l;
525 	__cpu_simple_unlock(&ld->ld_spinlock);
526 	splx(s);
527 }
528 
529 /*
530  * lockdebug_unlocked:
531  *
532  *	Process a lock release operation.
533  */
534 void
535 lockdebug_unlocked(const char *func, size_t line,
536     volatile void *lock, uintptr_t where, int shared)
537 {
538 	struct lwp *l = curlwp;
539 	lockdebug_t *ld;
540 	int s;
541 
542 	if (panicstr != NULL || ld_panic)
543 		return;
544 
545 	s = splhigh();
546 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
547 		splx(s);
548 		return;
549 	}
550 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
551 		if (lock == (void *)&lbolt) {
552 			/* nothing */
553 		} else {
554 			ld->ld_shares--;
555 		}
556 	} else if (shared) {
557 		if (l->l_shlocks == 0) {
558 			lockdebug_abort1(func, line, ld, s,
559 			    "no shared locks held by LWP", true);
560 			return;
561 		}
562 		if (ld->ld_shares == 0) {
563 			lockdebug_abort1(func, line, ld, s,
564 			    "no shared holds on this lock", true);
565 			return;
566 		}
567 		l->l_shlocks--;
568 		ld->ld_shares--;
569 		if (ld->ld_lwp == l) {
570 			ld->ld_unlocked = where;
571 			ld->ld_lwp = NULL;
572 		}
573 		if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
574 			ld->ld_cpu = (uint16_t)-1;
575 	} else {
576 		if ((ld->ld_flags & LD_LOCKED) == 0) {
577 			lockdebug_abort1(func, line, ld, s, "not locked", true);
578 			return;
579 		}
580 
581 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
582 			if (ld->ld_lwp != curlwp) {
583 				lockdebug_abort1(func, line, ld, s,
584 				    "not held by current LWP", true);
585 				return;
586 			}
587 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
588 		} else {
589 			if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
590 				lockdebug_abort1(func, line, ld, s,
591 				    "not held by current CPU", true);
592 				return;
593 			}
594 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
595 			    ld_chain);
596 		}
597 		ld->ld_flags &= ~LD_LOCKED;
598 		ld->ld_unlocked = where;
599 		ld->ld_lwp = NULL;
600 	}
601 	__cpu_simple_unlock(&ld->ld_spinlock);
602 	splx(s);
603 }
604 
605 /*
606  * lockdebug_wakeup:
607  *
608  *	Process a wakeup on a condition variable.
609  */
610 void
611 lockdebug_wakeup(const char *func, size_t line, volatile void *lock,
612     uintptr_t where)
613 {
614 	lockdebug_t *ld;
615 	int s;
616 
617 	if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
618 		return;
619 
620 	s = splhigh();
621 	/* Find the CV... */
622 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
623 		splx(s);
624 		return;
625 	}
626 	/*
627 	 * If it has any waiters, ensure that they are using the
628 	 * same interlock.
629 	 */
630 	if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
631 		lockdebug_abort1(func, line, ld, s, "interlocking mutex not "
632 		    "held during wakeup", true);
633 		return;
634 	}
635 	__cpu_simple_unlock(&ld->ld_spinlock);
636 	splx(s);
637 }
638 
639 /*
640  * lockdebug_barrier:
641  *
642  *	Panic if we hold more than one specified spin lock, and optionally,
643  *	if we hold sleep locks.
644  */
645 void
646 lockdebug_barrier(const char *func, size_t line, volatile void *spinlock,
647     int slplocks)
648 {
649 	struct lwp *l = curlwp;
650 	lockdebug_t *ld;
651 	int s;
652 
653 	if (panicstr != NULL || ld_panic)
654 		return;
655 
656 	s = splhigh();
657 	if ((l->l_pflag & LP_INTR) == 0) {
658 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
659 			if (ld->ld_lock == spinlock) {
660 				continue;
661 			}
662 			__cpu_simple_lock(&ld->ld_spinlock);
663 			lockdebug_abort1(func, line, ld, s,
664 			    "spin lock held", true);
665 			return;
666 		}
667 	}
668 	if (slplocks) {
669 		splx(s);
670 		return;
671 	}
672 	if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
673 		__cpu_simple_lock(&ld->ld_spinlock);
674 		lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
675 		return;
676 	}
677 	splx(s);
678 	if (l->l_shlocks != 0) {
679 		TAILQ_FOREACH(ld, &ld_all, ld_achain) {
680 			if (ld->ld_lockops->lo_type == LOCKOPS_CV)
681 				continue;
682 			if (ld->ld_lwp == l)
683 				lockdebug_dump(ld, printf);
684 		}
685 		panic("%s,%zu: holding %d shared locks", func, line,
686 		    l->l_shlocks);
687 	}
688 }
689 
690 /*
691  * lockdebug_mem_check:
692  *
693  *	Check for in-use locks within a memory region that is
694  *	being freed.
695  */
696 void
697 lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
698 {
699 	lockdebug_t *ld;
700 	struct cpu_info *ci;
701 	int s;
702 
703 	if (panicstr != NULL || ld_panic)
704 		return;
705 
706 	s = splhigh();
707 	ci = curcpu();
708 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
709 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
710 	if (ld != NULL) {
711 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
712 
713 		if ((uintptr_t)base > lock)
714 			panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
715 			    func, line, ld, base, sz);
716 		if (lock >= (uintptr_t)base + sz)
717 			ld = NULL;
718 	}
719 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
720 	if (ld != NULL) {
721 		__cpu_simple_lock(&ld->ld_spinlock);
722 		lockdebug_abort1(func, line, ld, s,
723 		    "allocation contains active lock", !cold);
724 		return;
725 	}
726 	splx(s);
727 }
728 
729 /*
730  * lockdebug_dump:
731  *
732  *	Dump information about a lock on panic, or for DDB.
733  */
734 static void
735 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
736     __printflike(1, 2))
737 {
738 	int sleeper = (ld->ld_flags & LD_SLEEPER);
739 
740 	(*pr)(
741 	    "lock address : %#018lx type     : %18s\n"
742 	    "initialized  : %#018lx",
743 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
744 	    (long)ld->ld_initaddr);
745 
746 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
747 		(*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
748 	} else {
749 		(*pr)("\n"
750 		    "shared holds : %18u exclusive: %18u\n"
751 		    "shares wanted: %18u exclusive: %18u\n"
752 		    "current cpu  : %18u last held: %18u\n"
753 		    "current lwp  : %#018lx last held: %#018lx\n"
754 		    "last locked%c : %#018lx unlocked%c: %#018lx\n",
755 		    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
756 		    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
757 		    (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
758 		    (long)curlwp, (long)ld->ld_lwp,
759 		    ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
760 		    (long)ld->ld_locked,
761 		    ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
762 		    (long)ld->ld_unlocked);
763 	}
764 
765 	if (ld->ld_lockops->lo_dump != NULL)
766 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
767 
768 	if (sleeper) {
769 		(*pr)("\n");
770 		turnstile_print(ld->ld_lock, pr);
771 	}
772 }
773 
774 /*
775  * lockdebug_abort1:
776  *
777  *	An error has been trapped - dump lock info and panic.
778  */
779 static void
780 lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
781 		 const char *msg, bool dopanic)
782 {
783 
784 	/*
785 	 * Don't make the situation worse if the system is already going
786 	 * down in flames.  Once a panic is triggered, lockdebug state
787 	 * becomes stale and cannot be trusted.
788 	 */
789 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
790 		__cpu_simple_unlock(&ld->ld_spinlock);
791 		splx(s);
792 		return;
793 	}
794 
795 	printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
796 	    func, line, msg);
797 	lockdebug_dump(ld, printf_nolog);
798 	__cpu_simple_unlock(&ld->ld_spinlock);
799 	splx(s);
800 	printf_nolog("\n");
801 	if (dopanic)
802 		panic("LOCKDEBUG: %s error: %s,%zu: %s",
803 		    ld->ld_lockops->lo_name, func, line, msg);
804 }
805 
806 #endif	/* LOCKDEBUG */
807 
808 /*
809  * lockdebug_lock_print:
810  *
811  *	Handle the DDB 'show lock' command.
812  */
813 #ifdef DDB
814 void
815 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
816 {
817 #ifdef LOCKDEBUG
818 	lockdebug_t *ld;
819 
820 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
821 		if (ld->ld_lock == NULL)
822 			continue;
823 		if (addr == NULL || ld->ld_lock == addr) {
824 			lockdebug_dump(ld, pr);
825 			if (addr != NULL)
826 				return;
827 		}
828 	}
829 	if (addr != NULL) {
830 		(*pr)("Sorry, no record of a lock with address %p found.\n",
831 		    addr);
832 	}
833 #else
834 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
835 #endif	/* LOCKDEBUG */
836 }
837 #endif	/* DDB */
838 
839 /*
840  * lockdebug_abort:
841  *
842  *	An error has been trapped - dump lock info and call panic().
843  */
844 void
845 lockdebug_abort(const char *func, size_t line, const volatile void *lock,
846     lockops_t *ops, const char *msg)
847 {
848 #ifdef LOCKDEBUG
849 	lockdebug_t *ld;
850 	int s;
851 
852 	s = splhigh();
853 	if ((ld = lockdebug_lookup(func, line, lock,
854 			(uintptr_t) __builtin_return_address(0))) != NULL) {
855 		lockdebug_abort1(func, line, ld, s, msg, true);
856 		return;
857 	}
858 	splx(s);
859 #endif	/* LOCKDEBUG */
860 
861 	/*
862 	 * Complain first on the occurrance only.  Otherwise proceeed to
863 	 * panic where we will `rendezvous' with other CPUs if the machine
864 	 * is going down in flames.
865 	 */
866 	if (atomic_inc_uint_nv(&ld_panic) == 1) {
867 		printf_nolog("%s error: %s,%zu: %s\n\n"
868 		    "lock address : %#018lx\n"
869 		    "current cpu  : %18d\n"
870 		    "current lwp  : %#018lx\n",
871 		    ops->lo_name, func, line, msg, (long)lock,
872 		    (int)cpu_index(curcpu()), (long)curlwp);
873 		(*ops->lo_dump)(lock);
874 		printf_nolog("\n");
875 	}
876 
877 	panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
878 	    ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
879 }
880