xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: subr_lockdebug.c,v 1.39 2008/11/07 19:50:00 cegger Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Basic lock debugging code shared among lock primitives.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.39 2008/11/07 19:50:00 cegger Exp $");
38 
39 #include "opt_ddb.h"
40 
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51 #include <sys/rb.h>
52 
53 #include <machine/lock.h>
54 
55 unsigned int		ld_panic;
56 
57 #ifdef LOCKDEBUG
58 
59 #define	LD_BATCH_SHIFT	9
60 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
61 #define	LD_BATCH_MASK	(LD_BATCH - 1)
62 #define	LD_MAX_LOCKS	1048576
63 #define	LD_SLOP		16
64 
65 #define	LD_LOCKED	0x01
66 #define	LD_SLEEPER	0x02
67 
68 #define	LD_WRITE_LOCK	0x80000000
69 
70 typedef struct lockdebug {
71 	struct rb_node	ld_rb_node;	/* must be the first member */
72 	__cpu_simple_lock_t ld_spinlock;
73 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
74 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
75 	volatile void	*ld_lock;
76 	lockops_t	*ld_lockops;
77 	struct lwp	*ld_lwp;
78 	uintptr_t	ld_locked;
79 	uintptr_t	ld_unlocked;
80 	uintptr_t	ld_initaddr;
81 	uint16_t	ld_shares;
82 	uint16_t	ld_cpu;
83 	uint8_t		ld_flags;
84 	uint8_t		ld_shwant;	/* advisory */
85 	uint8_t		ld_exwant;	/* advisory */
86 	uint8_t		ld_unused;
87 } volatile lockdebug_t;
88 
89 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
90 
91 __cpu_simple_lock_t	ld_mod_lk;
92 lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
93 lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
94 int			ld_nfree;
95 int			ld_freeptr;
96 int			ld_recurse;
97 bool			ld_nomore;
98 lockdebug_t		ld_prime[LD_BATCH];
99 
100 static void	lockdebug_abort1(lockdebug_t *, int, const char *,
101 				 const char *, bool);
102 static int	lockdebug_more(int);
103 static void	lockdebug_init(void);
104 
105 static signed int
106 ld_rbto_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
107 {
108 	const lockdebug_t *ld1 = (const void *)n1;
109 	const lockdebug_t *ld2 = (const void *)n2;
110 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
111 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
112 
113 	if (a < b)
114 		return 1;
115 	if (a > b)
116 		return -1;
117 	return 0;
118 }
119 
120 static signed int
121 ld_rbto_compare_key(const struct rb_node *n, const void *key)
122 {
123 	const lockdebug_t *ld = (const void *)n;
124 	const uintptr_t a = (uintptr_t)ld->ld_lock;
125 	const uintptr_t b = (uintptr_t)key;
126 
127 	if (a < b)
128 		return 1;
129 	if (a > b)
130 		return -1;
131 	return 0;
132 }
133 
134 static struct rb_tree ld_rb_tree;
135 
136 static const struct rb_tree_ops ld_rb_tree_ops = {
137 	.rbto_compare_nodes = ld_rbto_compare_nodes,
138 	.rbto_compare_key = ld_rbto_compare_key,
139 };
140 
141 static inline lockdebug_t *
142 lockdebug_lookup1(volatile void *lock)
143 {
144 	lockdebug_t *ld;
145 	struct cpu_info *ci;
146 
147 	ci = curcpu();
148 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
149 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
150 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
151 	if (ld == NULL) {
152 		return NULL;
153 	}
154 	__cpu_simple_lock(&ld->ld_spinlock);
155 
156 	return ld;
157 }
158 
159 static void
160 lockdebug_lock_cpus(void)
161 {
162 	CPU_INFO_ITERATOR cii;
163 	struct cpu_info *ci;
164 
165 	for (CPU_INFO_FOREACH(cii, ci)) {
166 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
167 	}
168 }
169 
170 static void
171 lockdebug_unlock_cpus(void)
172 {
173 	CPU_INFO_ITERATOR cii;
174 	struct cpu_info *ci;
175 
176 	for (CPU_INFO_FOREACH(cii, ci)) {
177 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
178 	}
179 }
180 
181 /*
182  * lockdebug_lookup:
183  *
184  *	Find a lockdebug structure by a pointer to a lock and return it locked.
185  */
186 static inline lockdebug_t *
187 lockdebug_lookup(volatile void *lock, uintptr_t where)
188 {
189 	lockdebug_t *ld;
190 
191 	ld = lockdebug_lookup1(lock);
192 	if (ld == NULL)
193 		panic("lockdebug_lookup: uninitialized lock (lock=%p, from=%08"PRIxPTR")", lock, where);
194 	return ld;
195 }
196 
197 /*
198  * lockdebug_init:
199  *
200  *	Initialize the lockdebug system.  Allocate an initial pool of
201  *	lockdebug structures before the VM system is up and running.
202  */
203 static void
204 lockdebug_init(void)
205 {
206 	lockdebug_t *ld;
207 	int i;
208 
209 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
210 	TAILQ_INIT(&curlwp->l_ld_locks);
211 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
212 	__cpu_simple_lock_init(&ld_mod_lk);
213 
214 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
215 
216 	ld = ld_prime;
217 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
218 		__cpu_simple_lock_init(&ld->ld_spinlock);
219 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
220 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
221 	}
222 	ld_freeptr = 1;
223 	ld_nfree = LD_BATCH - 1;
224 }
225 
226 /*
227  * lockdebug_alloc:
228  *
229  *	A lock is being initialized, so allocate an associated debug
230  *	structure.
231  */
232 bool
233 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
234 {
235 	struct cpu_info *ci;
236 	lockdebug_t *ld;
237 	int s;
238 
239 	if (lo == NULL || panicstr != NULL || ld_panic)
240 		return false;
241 	if (ld_freeptr == 0)
242 		lockdebug_init();
243 
244 	s = splhigh();
245 	__cpu_simple_lock(&ld_mod_lk);
246 	if ((ld = lockdebug_lookup1(lock)) != NULL) {
247 		__cpu_simple_unlock(&ld_mod_lk);
248 		lockdebug_abort1(ld, s, __func__, "already initialized", true);
249 		return false;
250 	}
251 
252 	/*
253 	 * Pinch a new debug structure.  We may recurse because we call
254 	 * kmem_alloc(), which may need to initialize new locks somewhere
255 	 * down the path.  If not recursing, we try to maintain at least
256 	 * LD_SLOP structures free, which should hopefully be enough to
257 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
258 	 * worry: we'll just mark the lock as not having an ID.
259 	 */
260 	ci = curcpu();
261 	ci->ci_lkdebug_recurse++;
262 	if (TAILQ_EMPTY(&ld_free)) {
263 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
264 			ci->ci_lkdebug_recurse--;
265 			__cpu_simple_unlock(&ld_mod_lk);
266 			splx(s);
267 			return false;
268 		}
269 		s = lockdebug_more(s);
270 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
271 		s = lockdebug_more(s);
272 	}
273 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
274 		__cpu_simple_unlock(&ld_mod_lk);
275 		splx(s);
276 		return false;
277 	}
278 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
279 	ld_nfree--;
280 	ci->ci_lkdebug_recurse--;
281 
282 	if (ld->ld_lock != NULL) {
283 		panic("lockdebug_alloc: corrupt table");
284 	}
285 
286 	/* Initialise the structure. */
287 	ld->ld_lock = lock;
288 	ld->ld_lockops = lo;
289 	ld->ld_locked = 0;
290 	ld->ld_unlocked = 0;
291 	ld->ld_lwp = NULL;
292 	ld->ld_initaddr = initaddr;
293 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
294 	lockdebug_lock_cpus();
295 	rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
296 	lockdebug_unlock_cpus();
297 	__cpu_simple_unlock(&ld_mod_lk);
298 
299 	splx(s);
300 	return true;
301 }
302 
303 /*
304  * lockdebug_free:
305  *
306  *	A lock is being destroyed, so release debugging resources.
307  */
308 void
309 lockdebug_free(volatile void *lock)
310 {
311 	lockdebug_t *ld;
312 	int s;
313 
314 	if (panicstr != NULL || ld_panic)
315 		return;
316 
317 	s = splhigh();
318 	__cpu_simple_lock(&ld_mod_lk);
319 	ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
320 	if (ld == NULL) {
321 		__cpu_simple_unlock(&ld_mod_lk);
322 		panic("lockdebug_free: destroying uninitialized object %p"
323 		    "(ld_lock=%p)", lock, ld->ld_lock);
324 		lockdebug_abort1(ld, s, __func__, "record follows", true);
325 		return;
326 	}
327 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
328 		__cpu_simple_unlock(&ld_mod_lk);
329 		lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
330 		return;
331 	}
332 	lockdebug_lock_cpus();
333 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
334 	lockdebug_unlock_cpus();
335 	ld->ld_lock = NULL;
336 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
337 	ld_nfree++;
338 	__cpu_simple_unlock(&ld->ld_spinlock);
339 	__cpu_simple_unlock(&ld_mod_lk);
340 	splx(s);
341 }
342 
343 /*
344  * lockdebug_more:
345  *
346  *	Allocate a batch of debug structures and add to the free list.
347  *	Must be called with ld_mod_lk held.
348  */
349 static int
350 lockdebug_more(int s)
351 {
352 	lockdebug_t *ld;
353 	void *block;
354 	int i, base, m;
355 
356 	/*
357 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
358 	 * deadlock, because we don't know which locks the caller holds.
359 	 */
360 	if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
361 		return s;
362 	}
363 
364 	while (ld_nfree < LD_SLOP) {
365 		__cpu_simple_unlock(&ld_mod_lk);
366 		splx(s);
367 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
368 		s = splhigh();
369 		__cpu_simple_lock(&ld_mod_lk);
370 
371 		if (block == NULL)
372 			return s;
373 
374 		if (ld_nfree > LD_SLOP) {
375 			/* Somebody beat us to it. */
376 			__cpu_simple_unlock(&ld_mod_lk);
377 			splx(s);
378 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
379 			s = splhigh();
380 			__cpu_simple_lock(&ld_mod_lk);
381 			continue;
382 		}
383 
384 		base = ld_freeptr;
385 		ld_nfree += LD_BATCH;
386 		ld = block;
387 		base <<= LD_BATCH_SHIFT;
388 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
389 
390 		if (m == LD_MAX_LOCKS)
391 			ld_nomore = true;
392 
393 		for (i = base; i < m; i++, ld++) {
394 			__cpu_simple_lock_init(&ld->ld_spinlock);
395 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
396 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
397 		}
398 
399 		membar_producer();
400 	}
401 
402 	return s;
403 }
404 
405 /*
406  * lockdebug_wantlock:
407  *
408  *	Process the preamble to a lock acquire.
409  */
410 void
411 lockdebug_wantlock(volatile void *lock, uintptr_t where, bool shared,
412 		   bool trylock)
413 {
414 	struct lwp *l = curlwp;
415 	lockdebug_t *ld;
416 	bool recurse;
417 	int s;
418 
419 	(void)shared;
420 	recurse = false;
421 
422 	if (panicstr != NULL || ld_panic)
423 		return;
424 
425 	s = splhigh();
426 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
427 		splx(s);
428 		return;
429 	}
430 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
431 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
432 			if (ld->ld_lwp == l && !(shared && trylock))
433 				recurse = true;
434 		} else if (ld->ld_cpu == (uint16_t)cpu_number())
435 			recurse = true;
436 	}
437 	if (cpu_intr_p()) {
438 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
439 			lockdebug_abort1(ld, s, __func__,
440 			    "acquiring sleep lock from interrupt context",
441 			    true);
442 			return;
443 		}
444 	}
445 	if (shared)
446 		ld->ld_shwant++;
447 	else
448 		ld->ld_exwant++;
449 	if (recurse) {
450 		lockdebug_abort1(ld, s, __func__, "locking against myself",
451 		    true);
452 		return;
453 	}
454 	__cpu_simple_unlock(&ld->ld_spinlock);
455 	splx(s);
456 }
457 
458 /*
459  * lockdebug_locked:
460  *
461  *	Process a lock acquire operation.
462  */
463 void
464 lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
465 		 int shared)
466 {
467 	struct lwp *l = curlwp;
468 	lockdebug_t *ld;
469 	int s;
470 
471 	if (panicstr != NULL || ld_panic)
472 		return;
473 
474 	s = splhigh();
475 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
476 		splx(s);
477 		return;
478 	}
479 	if (cvlock) {
480 		KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
481 		if (lock == (void *)&lbolt) {
482 			/* nothing */
483 		} else if (ld->ld_shares++ == 0) {
484 			ld->ld_locked = (uintptr_t)cvlock;
485 		} else if (cvlock != (void *)ld->ld_locked) {
486 			lockdebug_abort1(ld, s, __func__, "multiple locks used"
487 			    " with condition variable", true);
488 			return;
489 		}
490 	} else if (shared) {
491 		l->l_shlocks++;
492 		ld->ld_shares++;
493 		ld->ld_shwant--;
494 	} else {
495 		if ((ld->ld_flags & LD_LOCKED) != 0) {
496 			lockdebug_abort1(ld, s, __func__, "already locked",
497 			    true);
498 			return;
499 		}
500 		ld->ld_flags |= LD_LOCKED;
501 		ld->ld_locked = where;
502 		ld->ld_exwant--;
503 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
504 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
505 		} else {
506 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
507 			    ld, ld_chain);
508 		}
509 	}
510 	ld->ld_cpu = (uint16_t)cpu_number();
511 	ld->ld_lwp = l;
512 	__cpu_simple_unlock(&ld->ld_spinlock);
513 	splx(s);
514 }
515 
516 /*
517  * lockdebug_unlocked:
518  *
519  *	Process a lock release operation.
520  */
521 void
522 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
523 {
524 	struct lwp *l = curlwp;
525 	lockdebug_t *ld;
526 	int s;
527 
528 	if (panicstr != NULL || ld_panic)
529 		return;
530 
531 	s = splhigh();
532 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
533 		splx(s);
534 		return;
535 	}
536 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
537 		if (lock == (void *)&lbolt) {
538 			/* nothing */
539 		} else {
540 			ld->ld_shares--;
541 		}
542 	} else if (shared) {
543 		if (l->l_shlocks == 0) {
544 			lockdebug_abort1(ld, s, __func__,
545 			    "no shared locks held by LWP", true);
546 			return;
547 		}
548 		if (ld->ld_shares == 0) {
549 			lockdebug_abort1(ld, s, __func__,
550 			    "no shared holds on this lock", true);
551 			return;
552 		}
553 		l->l_shlocks--;
554 		ld->ld_shares--;
555 		if (ld->ld_lwp == l)
556 			ld->ld_lwp = NULL;
557 		if (ld->ld_cpu == (uint16_t)cpu_number())
558 			ld->ld_cpu = (uint16_t)-1;
559 	} else {
560 		if ((ld->ld_flags & LD_LOCKED) == 0) {
561 			lockdebug_abort1(ld, s, __func__, "not locked", true);
562 			return;
563 		}
564 
565 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
566 			if (ld->ld_lwp != curlwp) {
567 				lockdebug_abort1(ld, s, __func__,
568 				    "not held by current LWP", true);
569 				return;
570 			}
571 			ld->ld_flags &= ~LD_LOCKED;
572 			ld->ld_unlocked = where;
573 			ld->ld_lwp = NULL;
574 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
575 		} else {
576 			if (ld->ld_cpu != (uint16_t)cpu_number()) {
577 				lockdebug_abort1(ld, s, __func__,
578 				    "not held by current CPU", true);
579 				return;
580 			}
581 			ld->ld_flags &= ~LD_LOCKED;
582 			ld->ld_unlocked = where;
583 			ld->ld_lwp = NULL;
584 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
585 			    ld_chain);
586 		}
587 	}
588 	__cpu_simple_unlock(&ld->ld_spinlock);
589 	splx(s);
590 }
591 
592 /*
593  * lockdebug_wakeup:
594  *
595  *	Process a wakeup on a condition variable.
596  */
597 void
598 lockdebug_wakeup(volatile void *lock, uintptr_t where)
599 {
600 	lockdebug_t *ld;
601 	int s;
602 
603 	if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
604 		return;
605 
606 	s = splhigh();
607 	/* Find the CV... */
608 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
609 		splx(s);
610 		return;
611 	}
612 	/*
613 	 * If it has any waiters, ensure that they are using the
614 	 * same interlock.
615 	 */
616 	if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
617 		lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
618 		    "held during wakeup", true);
619 		return;
620 	}
621 	__cpu_simple_unlock(&ld->ld_spinlock);
622 	splx(s);
623 }
624 
625 /*
626  * lockdebug_barrier:
627  *
628  *	Panic if we hold more than one specified spin lock, and optionally,
629  *	if we hold sleep locks.
630  */
631 void
632 lockdebug_barrier(volatile void *spinlock, int slplocks)
633 {
634 	struct lwp *l = curlwp;
635 	lockdebug_t *ld;
636 	int s;
637 
638 	if (panicstr != NULL || ld_panic)
639 		return;
640 
641 	s = splhigh();
642 	if ((l->l_pflag & LP_INTR) == 0) {
643 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
644 			if (ld->ld_lock == spinlock) {
645 				continue;
646 			}
647 			__cpu_simple_lock(&ld->ld_spinlock);
648 			lockdebug_abort1(ld, s, __func__,
649 			    "spin lock held", true);
650 			return;
651 		}
652 	}
653 	if (slplocks) {
654 		splx(s);
655 		return;
656 	}
657 	if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
658 		__cpu_simple_lock(&ld->ld_spinlock);
659 		lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
660 		return;
661 	}
662 	splx(s);
663 	if (l->l_shlocks != 0) {
664 		panic("lockdebug_barrier: holding %d shared locks",
665 		    l->l_shlocks);
666 	}
667 }
668 
669 /*
670  * lockdebug_mem_check:
671  *
672  *	Check for in-use locks within a memory region that is
673  *	being freed.
674  */
675 void
676 lockdebug_mem_check(const char *func, void *base, size_t sz)
677 {
678 	lockdebug_t *ld;
679 	struct cpu_info *ci;
680 	int s;
681 
682 	if (panicstr != NULL || ld_panic)
683 		return;
684 
685 	s = splhigh();
686 	ci = curcpu();
687 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
688 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
689 	if (ld != NULL) {
690 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
691 
692 		if ((uintptr_t)base > lock)
693 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
694 			    __func__, ld, base, sz);
695 		if (lock >= (uintptr_t)base + sz)
696 			ld = NULL;
697 	}
698 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
699 	if (ld != NULL) {
700 		__cpu_simple_lock(&ld->ld_spinlock);
701 		lockdebug_abort1(ld, s, func,
702 		    "allocation contains active lock", !cold);
703 		return;
704 	}
705 	splx(s);
706 }
707 
708 /*
709  * lockdebug_dump:
710  *
711  *	Dump information about a lock on panic, or for DDB.
712  */
713 static void
714 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
715 {
716 	int sleeper = (ld->ld_flags & LD_SLEEPER);
717 
718 	(*pr)(
719 	    "lock address : %#018lx type     : %18s\n"
720 	    "initialized  : %#018lx",
721 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
722 	    (long)ld->ld_initaddr);
723 
724 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
725 		(*pr)(" interlock: %#018lx\n", ld->ld_locked);
726 	} else {
727 		(*pr)("\n"
728 		    "shared holds : %18u exclusive: %18u\n"
729 		    "shares wanted: %18u exclusive: %18u\n"
730 		    "current cpu  : %18u last held: %18u\n"
731 		    "current lwp  : %#018lx last held: %#018lx\n"
732 		    "last locked  : %#018lx unlocked : %#018lx\n",
733 		    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
734 		    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
735 		    (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
736 		    (long)curlwp, (long)ld->ld_lwp,
737 		    (long)ld->ld_locked, (long)ld->ld_unlocked);
738 	}
739 
740 	if (ld->ld_lockops->lo_dump != NULL)
741 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
742 
743 	if (sleeper) {
744 		(*pr)("\n");
745 		turnstile_print(ld->ld_lock, pr);
746 	}
747 }
748 
749 /*
750  * lockdebug_abort1:
751  *
752  *	An error has been trapped - dump lock info and panic.
753  */
754 static void
755 lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
756 		 const char *msg, bool dopanic)
757 {
758 
759 	/*
760 	 * Don't make the situation wose if the system is already going
761 	 * down in flames.  Once a panic is triggered, lockdebug state
762 	 * becomes stale and cannot be trusted.
763 	 */
764 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
765 		__cpu_simple_unlock(&ld->ld_spinlock);
766 		splx(s);
767 		return;
768 	}
769 
770 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
771 	    func, msg);
772 	lockdebug_dump(ld, printf_nolog);
773 	__cpu_simple_unlock(&ld->ld_spinlock);
774 	splx(s);
775 	printf_nolog("\n");
776 	if (dopanic)
777 		panic("LOCKDEBUG");
778 }
779 
780 #endif	/* LOCKDEBUG */
781 
782 /*
783  * lockdebug_lock_print:
784  *
785  *	Handle the DDB 'show lock' command.
786  */
787 #ifdef DDB
788 void
789 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
790 {
791 #ifdef LOCKDEBUG
792 	lockdebug_t *ld;
793 
794 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
795 		if (ld->ld_lock == addr) {
796 			lockdebug_dump(ld, pr);
797 			return;
798 		}
799 	}
800 	(*pr)("Sorry, no record of a lock with address %p found.\n", addr);
801 #else
802 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
803 #endif	/* LOCKDEBUG */
804 }
805 #endif	/* DDB */
806 
807 /*
808  * lockdebug_abort:
809  *
810  *	An error has been trapped - dump lock info and call panic().
811  */
812 void
813 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
814 		const char *msg)
815 {
816 #ifdef LOCKDEBUG
817 	lockdebug_t *ld;
818 	int s;
819 
820 	s = splhigh();
821 	if ((ld = lockdebug_lookup(lock,
822 			(uintptr_t) __builtin_return_address(0))) != NULL) {
823 		lockdebug_abort1(ld, s, func, msg, true);
824 		return;
825 	}
826 	splx(s);
827 #endif	/* LOCKDEBUG */
828 
829 	/*
830 	 * Complain first on the occurrance only.  Otherwise proceeed to
831 	 * panic where we will `rendezvous' with other CPUs if the machine
832 	 * is going down in flames.
833 	 */
834 	if (atomic_inc_uint_nv(&ld_panic) == 1) {
835 		printf_nolog("%s error: %s: %s\n\n"
836 		    "lock address : %#018lx\n"
837 		    "current cpu  : %18d\n"
838 		    "current lwp  : %#018lx\n",
839 		    ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
840 		    (long)curlwp);
841 		(*ops->lo_dump)(lock);
842 		printf_nolog("\n");
843 	}
844 
845 	panic("lock error");
846 }
847