xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision 42b9e898991e23b560315a9b1da6a36a39d4351b)
1 /*	$NetBSD: subr_lockdebug.c,v 1.77 2020/05/15 13:09:02 maxv Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Basic lock debugging code shared among lock primitives.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.77 2020/05/15 13:09:02 maxv Exp $");
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42 
43 #include <sys/param.h>
44 #include <sys/proc.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/kmem.h>
48 #include <sys/lockdebug.h>
49 #include <sys/sleepq.h>
50 #include <sys/cpu.h>
51 #include <sys/atomic.h>
52 #include <sys/lock.h>
53 #include <sys/rbtree.h>
54 #include <sys/ksyms.h>
55 #include <sys/kcov.h>
56 
57 #include <machine/lock.h>
58 
59 unsigned int		ld_panic;
60 
61 #ifdef LOCKDEBUG
62 
63 #ifdef __ia64__
64 #define	LD_BATCH_SHIFT	16
65 #else
66 #define	LD_BATCH_SHIFT	9
67 #endif
68 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
69 #define	LD_BATCH_MASK	(LD_BATCH - 1)
70 #define	LD_MAX_LOCKS	1048576
71 #define	LD_SLOP		16
72 
73 #define	LD_LOCKED	0x01
74 #define	LD_SLEEPER	0x02
75 
76 #define	LD_WRITE_LOCK	0x80000000
77 
78 typedef struct lockdebug {
79 	struct rb_node	ld_rb_node;
80 	__cpu_simple_lock_t ld_spinlock;
81 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
82 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
83 	volatile void	*ld_lock;
84 	lockops_t	*ld_lockops;
85 	struct lwp	*ld_lwp;
86 	uintptr_t	ld_locked;
87 	uintptr_t	ld_unlocked;
88 	uintptr_t	ld_initaddr;
89 	uint16_t	ld_shares;
90 	uint16_t	ld_cpu;
91 	uint8_t		ld_flags;
92 	uint8_t		ld_shwant;	/* advisory */
93 	uint8_t		ld_exwant;	/* advisory */
94 	uint8_t		ld_unused;
95 } volatile lockdebug_t;
96 
97 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
98 
99 __cpu_simple_lock_t	ld_mod_lk;
100 lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
101 #ifdef _KERNEL
102 lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
103 #else
104 extern lockdebuglist_t	ld_all;
105 #define cpu_name(a)	"?"
106 #define cpu_index(a)	-1
107 #define curlwp		NULL
108 #endif /* _KERNEL */
109 int			ld_nfree;
110 int			ld_freeptr;
111 int			ld_recurse;
112 bool			ld_nomore;
113 lockdebug_t		ld_prime[LD_BATCH];
114 
115 #ifdef _KERNEL
116 static void	lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
117     const char *, bool);
118 static int	lockdebug_more(int);
119 static void	lockdebug_init(void);
120 static void	lockdebug_dump(lwp_t *, lockdebug_t *,
121     void (*)(const char *, ...)
122     __printflike(1, 2));
123 
124 static signed int
125 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
126 {
127 	const lockdebug_t *ld1 = n1;
128 	const lockdebug_t *ld2 = n2;
129 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
130 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
131 
132 	if (a < b)
133 		return -1;
134 	if (a > b)
135 		return 1;
136 	return 0;
137 }
138 
139 static signed int
140 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
141 {
142 	const lockdebug_t *ld = n;
143 	const uintptr_t a = (uintptr_t)ld->ld_lock;
144 	const uintptr_t b = (uintptr_t)key;
145 
146 	if (a < b)
147 		return -1;
148 	if (a > b)
149 		return 1;
150 	return 0;
151 }
152 
153 static rb_tree_t ld_rb_tree;
154 
155 static const rb_tree_ops_t ld_rb_tree_ops = {
156 	.rbto_compare_nodes = ld_rbto_compare_nodes,
157 	.rbto_compare_key = ld_rbto_compare_key,
158 	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
159 	.rbto_context = NULL
160 };
161 
162 static inline lockdebug_t *
163 lockdebug_lookup1(const volatile void *lock)
164 {
165 	lockdebug_t *ld;
166 	struct cpu_info *ci;
167 
168 	ci = curcpu();
169 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
170 	ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
171 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
172 	if (ld == NULL) {
173 		return NULL;
174 	}
175 	__cpu_simple_lock(&ld->ld_spinlock);
176 
177 	return ld;
178 }
179 
180 static void
181 lockdebug_lock_cpus(void)
182 {
183 	CPU_INFO_ITERATOR cii;
184 	struct cpu_info *ci;
185 
186 	for (CPU_INFO_FOREACH(cii, ci)) {
187 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
188 	}
189 }
190 
191 static void
192 lockdebug_unlock_cpus(void)
193 {
194 	CPU_INFO_ITERATOR cii;
195 	struct cpu_info *ci;
196 
197 	for (CPU_INFO_FOREACH(cii, ci)) {
198 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
199 	}
200 }
201 
202 /*
203  * lockdebug_lookup:
204  *
205  *	Find a lockdebug structure by a pointer to a lock and return it locked.
206  */
207 static inline lockdebug_t *
208 lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
209     uintptr_t where)
210 {
211 	lockdebug_t *ld;
212 
213 	kcov_silence_enter();
214 	ld = lockdebug_lookup1(lock);
215 	kcov_silence_leave();
216 
217 	if (__predict_false(ld == NULL)) {
218 		panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
219 		    PRIxPTR ")", func, line, lock, where);
220 	}
221 	return ld;
222 }
223 
224 /*
225  * lockdebug_init:
226  *
227  *	Initialize the lockdebug system.  Allocate an initial pool of
228  *	lockdebug structures before the VM system is up and running.
229  */
230 static void
231 lockdebug_init(void)
232 {
233 	lockdebug_t *ld;
234 	int i;
235 
236 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
237 	TAILQ_INIT(&curlwp->l_ld_locks);
238 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
239 	__cpu_simple_lock_init(&ld_mod_lk);
240 
241 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
242 
243 	ld = ld_prime;
244 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
245 		__cpu_simple_lock_init(&ld->ld_spinlock);
246 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
247 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
248 	}
249 	ld_freeptr = 1;
250 	ld_nfree = LD_BATCH - 1;
251 }
252 
253 /*
254  * lockdebug_alloc:
255  *
256  *	A lock is being initialized, so allocate an associated debug
257  *	structure.
258  */
259 bool
260 lockdebug_alloc(const char *func, size_t line, volatile void *lock,
261     lockops_t *lo, uintptr_t initaddr)
262 {
263 	struct cpu_info *ci;
264 	lockdebug_t *ld;
265 	int s;
266 
267 	if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
268 		return false;
269 	if (__predict_false(ld_freeptr == 0))
270 		lockdebug_init();
271 
272 	s = splhigh();
273 	__cpu_simple_lock(&ld_mod_lk);
274 	if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
275 		__cpu_simple_unlock(&ld_mod_lk);
276 		lockdebug_abort1(func, line, ld, s, "already initialized",
277 		    true);
278 		return false;
279 	}
280 
281 	/*
282 	 * Pinch a new debug structure.  We may recurse because we call
283 	 * kmem_alloc(), which may need to initialize new locks somewhere
284 	 * down the path.  If not recursing, we try to maintain at least
285 	 * LD_SLOP structures free, which should hopefully be enough to
286 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
287 	 * worry: we'll just mark the lock as not having an ID.
288 	 */
289 	ci = curcpu();
290 	ci->ci_lkdebug_recurse++;
291 	if (TAILQ_EMPTY(&ld_free)) {
292 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
293 			ci->ci_lkdebug_recurse--;
294 			__cpu_simple_unlock(&ld_mod_lk);
295 			splx(s);
296 			return false;
297 		}
298 		s = lockdebug_more(s);
299 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
300 		s = lockdebug_more(s);
301 	}
302 	if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
303 		__cpu_simple_unlock(&ld_mod_lk);
304 		splx(s);
305 		return false;
306 	}
307 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
308 	ld_nfree--;
309 	ci->ci_lkdebug_recurse--;
310 
311 	if (__predict_false(ld->ld_lock != NULL)) {
312 		panic("%s,%zu: corrupt table ld %p", func, line, ld);
313 	}
314 
315 	/* Initialise the structure. */
316 	ld->ld_lock = lock;
317 	ld->ld_lockops = lo;
318 	ld->ld_locked = 0;
319 	ld->ld_unlocked = 0;
320 	ld->ld_lwp = NULL;
321 	ld->ld_initaddr = initaddr;
322 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
323 	lockdebug_lock_cpus();
324 	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
325 	lockdebug_unlock_cpus();
326 	__cpu_simple_unlock(&ld_mod_lk);
327 
328 	splx(s);
329 	return true;
330 }
331 
332 /*
333  * lockdebug_free:
334  *
335  *	A lock is being destroyed, so release debugging resources.
336  */
337 void
338 lockdebug_free(const char *func, size_t line, volatile void *lock)
339 {
340 	lockdebug_t *ld;
341 	int s;
342 
343 	if (__predict_false(panicstr != NULL || ld_panic))
344 		return;
345 
346 	s = splhigh();
347 	__cpu_simple_lock(&ld_mod_lk);
348 	ld = lockdebug_lookup(func, line, lock,
349 	    (uintptr_t) __builtin_return_address(0));
350 	if (__predict_false(ld == NULL)) {
351 		__cpu_simple_unlock(&ld_mod_lk);
352 		panic("%s,%zu: destroying uninitialized object %p"
353 		    "(ld_lock=%p)", func, line, lock, ld->ld_lock);
354 		return;
355 	}
356 	if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
357 	    ld->ld_shares != 0)) {
358 		__cpu_simple_unlock(&ld_mod_lk);
359 		lockdebug_abort1(func, line, ld, s, "is locked or in use",
360 		    true);
361 		return;
362 	}
363 	lockdebug_lock_cpus();
364 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
365 	lockdebug_unlock_cpus();
366 	ld->ld_lock = NULL;
367 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
368 	ld_nfree++;
369 	__cpu_simple_unlock(&ld->ld_spinlock);
370 	__cpu_simple_unlock(&ld_mod_lk);
371 	splx(s);
372 }
373 
374 /*
375  * lockdebug_more:
376  *
377  *	Allocate a batch of debug structures and add to the free list.
378  *	Must be called with ld_mod_lk held.
379  */
380 static int
381 lockdebug_more(int s)
382 {
383 	lockdebug_t *ld;
384 	void *block;
385 	int i, base, m;
386 
387 	/*
388 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
389 	 * deadlock, because we don't know which locks the caller holds.
390 	 */
391 	if (cpu_intr_p() || cpu_softintr_p()) {
392 		return s;
393 	}
394 
395 	while (ld_nfree < LD_SLOP) {
396 		__cpu_simple_unlock(&ld_mod_lk);
397 		splx(s);
398 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
399 		s = splhigh();
400 		__cpu_simple_lock(&ld_mod_lk);
401 
402 		if (ld_nfree > LD_SLOP) {
403 			/* Somebody beat us to it. */
404 			__cpu_simple_unlock(&ld_mod_lk);
405 			splx(s);
406 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
407 			s = splhigh();
408 			__cpu_simple_lock(&ld_mod_lk);
409 			continue;
410 		}
411 
412 		base = ld_freeptr;
413 		ld_nfree += LD_BATCH;
414 		ld = block;
415 		base <<= LD_BATCH_SHIFT;
416 		m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
417 
418 		if (m == LD_MAX_LOCKS)
419 			ld_nomore = true;
420 
421 		for (i = base; i < m; i++, ld++) {
422 			__cpu_simple_lock_init(&ld->ld_spinlock);
423 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
424 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
425 		}
426 
427 		membar_producer();
428 	}
429 
430 	return s;
431 }
432 
433 /*
434  * lockdebug_wantlock:
435  *
436  *	Process the preamble to a lock acquire.  The "shared"
437  *	parameter controls which ld_{ex,sh}want counter is
438  *	updated; a negative value of shared updates neither.
439  */
440 void
441 lockdebug_wantlock(const char *func, size_t line,
442     const volatile void *lock, uintptr_t where, int shared)
443 {
444 	struct lwp *l = curlwp;
445 	lockdebug_t *ld;
446 	bool recurse;
447 	int s;
448 
449 	(void)shared;
450 	recurse = false;
451 
452 	if (__predict_false(panicstr != NULL || ld_panic))
453 		return;
454 
455 	s = splhigh();
456 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
457 		splx(s);
458 		return;
459 	}
460 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
461 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
462 			if (ld->ld_lwp == l)
463 				recurse = true;
464 		} else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
465 			recurse = true;
466 	}
467 	if (cpu_intr_p()) {
468 		if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
469 			lockdebug_abort1(func, line, ld, s,
470 			    "acquiring sleep lock from interrupt context",
471 			    true);
472 			return;
473 		}
474 	}
475 	if (shared > 0)
476 		ld->ld_shwant++;
477 	else if (shared == 0)
478 		ld->ld_exwant++;
479 	if (__predict_false(recurse)) {
480 		lockdebug_abort1(func, line, ld, s, "locking against myself",
481 		    true);
482 		return;
483 	}
484 	if (l->l_ld_wanted == NULL) {
485 		l->l_ld_wanted = ld;
486 	}
487 	__cpu_simple_unlock(&ld->ld_spinlock);
488 	splx(s);
489 }
490 
491 /*
492  * lockdebug_locked:
493  *
494  *	Process a lock acquire operation.
495  */
496 void
497 lockdebug_locked(const char *func, size_t line,
498     volatile void *lock, void *cvlock, uintptr_t where, int shared)
499 {
500 	struct lwp *l = curlwp;
501 	lockdebug_t *ld;
502 	int s;
503 
504 	if (__predict_false(panicstr != NULL || ld_panic))
505 		return;
506 
507 	s = splhigh();
508 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
509 		splx(s);
510 		return;
511 	}
512 	if (shared) {
513 		l->l_shlocks++;
514 		ld->ld_locked = where;
515 		ld->ld_shares++;
516 		ld->ld_shwant--;
517 	} else {
518 		if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
519 			lockdebug_abort1(func, line, ld, s, "already locked",
520 			    true);
521 			return;
522 		}
523 		ld->ld_flags |= LD_LOCKED;
524 		ld->ld_locked = where;
525 		ld->ld_exwant--;
526 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
527 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
528 		} else {
529 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
530 			    ld, ld_chain);
531 		}
532 	}
533 	ld->ld_cpu = (uint16_t)cpu_index(curcpu());
534 	ld->ld_lwp = l;
535 	__cpu_simple_unlock(&ld->ld_spinlock);
536 	if (l->l_ld_wanted == ld) {
537 		l->l_ld_wanted = NULL;
538 	}
539 	splx(s);
540 }
541 
542 /*
543  * lockdebug_unlocked:
544  *
545  *	Process a lock release operation.
546  */
547 void
548 lockdebug_unlocked(const char *func, size_t line,
549     volatile void *lock, uintptr_t where, int shared)
550 {
551 	struct lwp *l = curlwp;
552 	lockdebug_t *ld;
553 	int s;
554 
555 	if (__predict_false(panicstr != NULL || ld_panic))
556 		return;
557 
558 	s = splhigh();
559 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
560 		splx(s);
561 		return;
562 	}
563 	if (shared) {
564 		if (__predict_false(l->l_shlocks == 0)) {
565 			lockdebug_abort1(func, line, ld, s,
566 			    "no shared locks held by LWP", true);
567 			return;
568 		}
569 		if (__predict_false(ld->ld_shares == 0)) {
570 			lockdebug_abort1(func, line, ld, s,
571 			    "no shared holds on this lock", true);
572 			return;
573 		}
574 		l->l_shlocks--;
575 		ld->ld_shares--;
576 		if (ld->ld_lwp == l) {
577 			ld->ld_unlocked = where;
578 			ld->ld_lwp = NULL;
579 		}
580 		if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
581 			ld->ld_cpu = (uint16_t)-1;
582 	} else {
583 		if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
584 			lockdebug_abort1(func, line, ld, s, "not locked", true);
585 			return;
586 		}
587 
588 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
589 			if (__predict_false(ld->ld_lwp != curlwp)) {
590 				lockdebug_abort1(func, line, ld, s,
591 				    "not held by current LWP", true);
592 				return;
593 			}
594 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
595 		} else {
596 			uint16_t idx = (uint16_t)cpu_index(curcpu());
597 			if (__predict_false(ld->ld_cpu != idx)) {
598 				lockdebug_abort1(func, line, ld, s,
599 				    "not held by current CPU", true);
600 				return;
601 			}
602 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
603 			    ld_chain);
604 		}
605 		ld->ld_flags &= ~LD_LOCKED;
606 		ld->ld_unlocked = where;
607 		ld->ld_lwp = NULL;
608 	}
609 	__cpu_simple_unlock(&ld->ld_spinlock);
610 	splx(s);
611 }
612 
613 /*
614  * lockdebug_barrier:
615  *
616  *	Panic if we hold more than one specified lock, and optionally, if we
617  *	hold any sleep locks.
618  */
619 void
620 lockdebug_barrier(const char *func, size_t line, volatile void *onelock,
621     int slplocks)
622 {
623 	struct lwp *l = curlwp;
624 	lockdebug_t *ld;
625 	int s;
626 
627 	if (__predict_false(panicstr != NULL || ld_panic))
628 		return;
629 
630 	s = splhigh();
631 	if ((l->l_pflag & LP_INTR) == 0) {
632 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
633 			if (ld->ld_lock == onelock) {
634 				continue;
635 			}
636 			__cpu_simple_lock(&ld->ld_spinlock);
637 			lockdebug_abort1(func, line, ld, s,
638 			    "spin lock held", true);
639 			return;
640 		}
641 	}
642 	if (slplocks) {
643 		splx(s);
644 		return;
645 	}
646 	ld = TAILQ_FIRST(&l->l_ld_locks);
647 	if (__predict_false(ld != NULL && ld->ld_lock != onelock)) {
648 		__cpu_simple_lock(&ld->ld_spinlock);
649 		lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
650 		return;
651 	}
652 	splx(s);
653 	if (l->l_shlocks != 0) {
654 		TAILQ_FOREACH(ld, &ld_all, ld_achain) {
655 			if (ld->ld_lock == onelock) {
656 				continue;
657 			}
658 			if (ld->ld_lwp == l)
659 				lockdebug_dump(l, ld, printf);
660 		}
661 		panic("%s,%zu: holding %d shared locks", func, line,
662 		    l->l_shlocks);
663 	}
664 }
665 
666 /*
667  * lockdebug_mem_check:
668  *
669  *	Check for in-use locks within a memory region that is
670  *	being freed.
671  */
672 void
673 lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
674 {
675 	lockdebug_t *ld;
676 	struct cpu_info *ci;
677 	int s;
678 
679 	if (__predict_false(panicstr != NULL || ld_panic))
680 		return;
681 
682 	kcov_silence_enter();
683 
684 	s = splhigh();
685 	ci = curcpu();
686 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
687 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
688 	if (ld != NULL) {
689 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
690 
691 		if (__predict_false((uintptr_t)base > lock))
692 			panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
693 			    func, line, ld, base, sz);
694 		if (lock >= (uintptr_t)base + sz)
695 			ld = NULL;
696 	}
697 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
698 	if (__predict_false(ld != NULL)) {
699 		__cpu_simple_lock(&ld->ld_spinlock);
700 		lockdebug_abort1(func, line, ld, s,
701 		    "allocation contains active lock", !cold);
702 		kcov_silence_leave();
703 		return;
704 	}
705 	splx(s);
706 
707 	kcov_silence_leave();
708 }
709 #endif /* _KERNEL */
710 
711 #ifdef DDB
712 #include <machine/db_machdep.h>
713 #include <ddb/db_interface.h>
714 #include <ddb/db_access.h>
715 #endif
716 
717 /*
718  * lockdebug_dump:
719  *
720  *	Dump information about a lock on panic, or for DDB.
721  */
722 static void
723 lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...)
724     __printflike(1, 2))
725 {
726 	int sleeper = (ld->ld_flags & LD_SLEEPER);
727 	lockops_t *lo = ld->ld_lockops;
728 
729 	(*pr)(
730 	    "lock address : %#018lx type     : %18s\n"
731 	    "initialized  : %#018lx",
732 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
733 	    (long)ld->ld_initaddr);
734 
735 #ifndef _KERNEL
736 	lockops_t los;
737 	lo = &los;
738 	db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo);
739 #endif
740 	(*pr)("\n"
741 	    "shared holds : %18u exclusive: %18u\n"
742 	    "shares wanted: %18u exclusive: %18u\n"
743 	    "relevant cpu : %18u last held: %18u\n"
744 	    "relevant lwp : %#018lx last held: %#018lx\n"
745 	    "last locked%c : %#018lx unlocked%c: %#018lx\n",
746 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
747 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
748 	    (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu,
749 	    (long)l, (long)ld->ld_lwp,
750 	    ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
751 	    (long)ld->ld_locked,
752 	    ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
753 	    (long)ld->ld_unlocked);
754 
755 #ifdef _KERNEL
756 	if (lo->lo_dump != NULL)
757 		(*lo->lo_dump)(ld->ld_lock, pr);
758 
759 	if (sleeper) {
760 		turnstile_print(ld->ld_lock, pr);
761 	}
762 #endif
763 }
764 
765 #ifdef _KERNEL
766 /*
767  * lockdebug_abort1:
768  *
769  *	An error has been trapped - dump lock info and panic.
770  */
771 static void
772 lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
773 		 const char *msg, bool dopanic)
774 {
775 
776 	/*
777 	 * Don't make the situation worse if the system is already going
778 	 * down in flames.  Once a panic is triggered, lockdebug state
779 	 * becomes stale and cannot be trusted.
780 	 */
781 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
782 		__cpu_simple_unlock(&ld->ld_spinlock);
783 		splx(s);
784 		return;
785 	}
786 
787 	printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
788 	    func, line, msg);
789 	lockdebug_dump(curlwp, ld, printf_nolog);
790 	__cpu_simple_unlock(&ld->ld_spinlock);
791 	splx(s);
792 	printf_nolog("\n");
793 	if (dopanic)
794 		panic("LOCKDEBUG: %s error: %s,%zu: %s",
795 		    ld->ld_lockops->lo_name, func, line, msg);
796 }
797 
798 #endif /* _KERNEL */
799 #endif	/* LOCKDEBUG */
800 
801 /*
802  * lockdebug_lock_print:
803  *
804  *	Handle the DDB 'show lock' command.
805  */
806 #ifdef DDB
807 void
808 lockdebug_lock_print(void *addr,
809     void (*pr)(const char *, ...) __printflike(1, 2))
810 {
811 #ifdef LOCKDEBUG
812 	lockdebug_t *ld, lds;
813 
814 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
815 		db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds));
816 		ld = &lds;
817 		if (ld->ld_lock == NULL)
818 			continue;
819 		if (addr == NULL || ld->ld_lock == addr) {
820 			lockdebug_dump(curlwp, ld, pr);
821 			if (addr != NULL)
822 				return;
823 		}
824 	}
825 	if (addr != NULL) {
826 		(*pr)("Sorry, no record of a lock with address %p found.\n",
827 		    addr);
828 	}
829 #else
830 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
831 #endif	/* LOCKDEBUG */
832 }
833 
834 #ifdef _KERNEL
835 #ifdef LOCKDEBUG
836 static void
837 lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i,
838     void (*pr)(const char *, ...) __printflike(1, 2))
839 {
840 	const char *sym;
841 
842 #ifdef _KERNEL
843 	ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr,
844 	    KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
845 #endif
846 	(*pr)("* Lock %d (initialized at %s)\n", i++, sym);
847 	lockdebug_dump(l, ld, pr);
848 }
849 
850 static void
851 lockdebug_show_trace(const void *ptr,
852     void (*pr)(const char *, ...) __printflike(1, 2))
853 {
854     db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
855 }
856 
857 static void
858 lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
859     bool show_trace)
860 {
861 	struct proc *p;
862 
863 	LIST_FOREACH(p, &allproc, p_list) {
864 		struct lwp *l;
865 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
866 			lockdebug_t *ld;
867 			int i = 0;
868 			if (TAILQ_EMPTY(&l->l_ld_locks) &&
869 			    l->l_ld_wanted == NULL) {
870 			    	continue;
871 			}
872 			(*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n",
873 			    p->p_pid, l->l_lid,
874 			    l->l_name ? l->l_name : p->p_comm, l, l->l_stat);
875 			if (!TAILQ_EMPTY(&l->l_ld_locks)) {
876 				(*pr)("\n*** Locks held: \n");
877 				TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
878 					(*pr)("\n");
879 					lockdebug_show_one(l, ld, i++, pr);
880 				}
881 			} else {
882 				(*pr)("\n*** Locks held: none\n");
883 			}
884 
885 			if (l->l_ld_wanted != NULL) {
886 				(*pr)("\n*** Locks wanted: \n\n");
887 				lockdebug_show_one(l, l->l_ld_wanted, 0, pr);
888 			} else {
889 				(*pr)("\n*** Locks wanted: none\n");
890 			}
891 			if (show_trace) {
892 				(*pr)("\n*** Traceback: \n\n");
893 				lockdebug_show_trace(l, pr);
894 				(*pr)("\n");
895 			}
896 		}
897 	}
898 }
899 
900 static void
901 lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2),
902     bool show_trace)
903 {
904 	lockdebug_t *ld;
905 	CPU_INFO_ITERATOR cii;
906 	struct cpu_info *ci;
907 
908 	for (CPU_INFO_FOREACH(cii, ci)) {
909 		int i = 0;
910 		if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
911 			continue;
912 		(*pr)("\n******* Locks held on %s:\n", cpu_name(ci));
913 		TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
914 			(*pr)("\n");
915 #ifdef MULTIPROCESSOR
916 			lockdebug_show_one(ci->ci_curlwp, ld, i++, pr);
917 			if (show_trace)
918 				lockdebug_show_trace(ci->ci_curlwp, pr);
919 #else
920 			lockdebug_show_one(curlwp, ld, i++, pr);
921 			if (show_trace)
922 				lockdebug_show_trace(curlwp, pr);
923 #endif
924 		}
925 	}
926 }
927 #endif /* _KERNEL */
928 #endif	/* LOCKDEBUG */
929 
930 #ifdef _KERNEL
931 void
932 lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
933     const char *modif)
934 {
935 #ifdef LOCKDEBUG
936 	bool show_trace = false;
937 	if (modif[0] == 't')
938 		show_trace = true;
939 
940 	(*pr)("[Locks tracked through LWPs]\n");
941 	lockdebug_show_all_locks_lwp(pr, show_trace);
942 	(*pr)("\n");
943 
944 	(*pr)("[Locks tracked through CPUs]\n");
945 	lockdebug_show_all_locks_cpu(pr, show_trace);
946 	(*pr)("\n");
947 #else
948 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
949 #endif	/* LOCKDEBUG */
950 }
951 
952 void
953 lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2))
954 {
955 #ifdef LOCKDEBUG
956 	lockdebug_t *ld;
957 	void *_ld;
958 	uint32_t n_null = 0;
959 	uint32_t n_spin_mutex = 0;
960 	uint32_t n_adaptive_mutex = 0;
961 	uint32_t n_rwlock = 0;
962 	uint32_t n_others = 0;
963 
964 	RB_TREE_FOREACH(_ld, &ld_rb_tree) {
965 		ld = _ld;
966 		if (ld->ld_lock == NULL) {
967 			n_null++;
968 			continue;
969 		}
970 		if (ld->ld_lockops->lo_name[0] == 'M') {
971 			if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
972 				n_adaptive_mutex++;
973 			else
974 				n_spin_mutex++;
975 			continue;
976 		}
977 		if (ld->ld_lockops->lo_name[0] == 'R') {
978 			n_rwlock++;
979 			continue;
980 		}
981 		n_others++;
982 	}
983 	(*pr)(
984 	    "spin mutex: %u\n"
985 	    "adaptive mutex: %u\n"
986 	    "rwlock: %u\n"
987 	    "null locks: %u\n"
988 	    "others: %u\n",
989 	    n_spin_mutex, n_adaptive_mutex, n_rwlock,
990 	    n_null, n_others);
991 #else
992 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
993 #endif	/* LOCKDEBUG */
994 }
995 #endif /* _KERNEL */
996 #endif	/* DDB */
997 
998 #ifdef _KERNEL
999 /*
1000  * lockdebug_dismiss:
1001  *
1002  *      The system is rebooting, and potentially from an unsafe
1003  *      place so avoid any future aborts.
1004  */
1005 void
1006 lockdebug_dismiss(void)
1007 {
1008 
1009 	atomic_inc_uint_nv(&ld_panic);
1010 }
1011 
1012 /*
1013  * lockdebug_abort:
1014  *
1015  *	An error has been trapped - dump lock info and call panic().
1016  */
1017 void
1018 lockdebug_abort(const char *func, size_t line, const volatile void *lock,
1019     lockops_t *ops, const char *msg)
1020 {
1021 #ifdef LOCKDEBUG
1022 	lockdebug_t *ld;
1023 	int s;
1024 
1025 	s = splhigh();
1026 	if ((ld = lockdebug_lookup(func, line, lock,
1027 			(uintptr_t) __builtin_return_address(0))) != NULL) {
1028 		lockdebug_abort1(func, line, ld, s, msg, true);
1029 		return;
1030 	}
1031 	splx(s);
1032 #endif	/* LOCKDEBUG */
1033 
1034 	/*
1035 	 * Don't make the situation worse if the system is already going
1036 	 * down in flames.  Once a panic is triggered, lockdebug state
1037 	 * becomes stale and cannot be trusted.
1038 	 */
1039 	if (atomic_inc_uint_nv(&ld_panic) > 1)
1040 		return;
1041 
1042 	printf_nolog("%s error: %s,%zu: %s\n\n"
1043 	    "lock address : %#018lx\n"
1044 	    "current cpu  : %18d\n"
1045 	    "current lwp  : %#018lx\n",
1046 	    ops->lo_name, func, line, msg, (long)lock,
1047 	    (int)cpu_index(curcpu()), (long)curlwp);
1048 	(*ops->lo_dump)(lock, printf_nolog);
1049 	printf_nolog("\n");
1050 
1051 	panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
1052 	    ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
1053 }
1054 #endif /* _KERNEL */
1055