xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision e8f73aaeb4fee813cc2faac9f1e5ef364d41657e)
1*e8f73aaeSnakayama /*	$NetBSD: subr_lockdebug.c,v 1.83 2022/09/02 06:01:38 nakayama Exp $	*/
2b07ec3fcSad 
3b07ec3fcSad /*-
441a8f863Sad  * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5b07ec3fcSad  * All rights reserved.
6b07ec3fcSad  *
7b07ec3fcSad  * This code is derived from software contributed to The NetBSD Foundation
8b07ec3fcSad  * by Andrew Doran.
9b07ec3fcSad  *
10b07ec3fcSad  * Redistribution and use in source and binary forms, with or without
11b07ec3fcSad  * modification, are permitted provided that the following conditions
12b07ec3fcSad  * are met:
13b07ec3fcSad  * 1. Redistributions of source code must retain the above copyright
14b07ec3fcSad  *    notice, this list of conditions and the following disclaimer.
15b07ec3fcSad  * 2. Redistributions in binary form must reproduce the above copyright
16b07ec3fcSad  *    notice, this list of conditions and the following disclaimer in the
17b07ec3fcSad  *    documentation and/or other materials provided with the distribution.
18b07ec3fcSad  *
19b07ec3fcSad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20b07ec3fcSad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21b07ec3fcSad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22b07ec3fcSad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23b07ec3fcSad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24b07ec3fcSad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25b07ec3fcSad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26b07ec3fcSad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27b07ec3fcSad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28b07ec3fcSad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29b07ec3fcSad  * POSSIBILITY OF SUCH DAMAGE.
30b07ec3fcSad  */
31b07ec3fcSad 
32b07ec3fcSad /*
33dde5d75eSad  * Basic lock debugging code shared among lock primitives.
34b07ec3fcSad  */
35b07ec3fcSad 
360ca3d21bSdsl #include <sys/cdefs.h>
37*e8f73aaeSnakayama __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.83 2022/09/02 06:01:38 nakayama Exp $");
380ca3d21bSdsl 
3905cf8927Sozaki-r #ifdef _KERNEL_OPT
40b07ec3fcSad #include "opt_ddb.h"
4105cf8927Sozaki-r #endif
42b07ec3fcSad 
43b07ec3fcSad #include <sys/param.h>
44b07ec3fcSad #include <sys/proc.h>
45b07ec3fcSad #include <sys/systm.h>
4611dc6399Sad #include <sys/kernel.h>
47b07ec3fcSad #include <sys/kmem.h>
48b07ec3fcSad #include <sys/lockdebug.h>
49b07ec3fcSad #include <sys/sleepq.h>
5011dc6399Sad #include <sys/cpu.h>
51b470ab62Sad #include <sys/atomic.h>
52212c50ddSad #include <sys/lock.h>
5319e6c76bSmatt #include <sys/rbtree.h>
54e611636fSozaki-r #include <sys/ksyms.h>
5569ffbd32Smaxv #include <sys/kcov.h>
5638d5e341Syamt 
570664a045Sad #include <machine/lock.h>
580664a045Sad 
591bfc2d5fSmsaitoh #ifdef DDB
601bfc2d5fSmsaitoh #include <machine/db_machdep.h>
611bfc2d5fSmsaitoh #include <ddb/db_interface.h>
621bfc2d5fSmsaitoh #include <ddb/db_access.h>
631bfc2d5fSmsaitoh #include <ddb/db_sym.h>
641bfc2d5fSmsaitoh #endif
651bfc2d5fSmsaitoh 
66057adba1Sad unsigned int		ld_panic;
67057adba1Sad 
68b07ec3fcSad #ifdef LOCKDEBUG
69b07ec3fcSad 
70c717d9c1Sscole #ifdef __ia64__
71c717d9c1Sscole #define	LD_BATCH_SHIFT	16
72c717d9c1Sscole #else
73b07ec3fcSad #define	LD_BATCH_SHIFT	9
74c717d9c1Sscole #endif
75b07ec3fcSad #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
76b07ec3fcSad #define	LD_BATCH_MASK	(LD_BATCH - 1)
77b07ec3fcSad #define	LD_MAX_LOCKS	1048576
78b07ec3fcSad #define	LD_SLOP		16
79b07ec3fcSad 
80b07ec3fcSad #define	LD_LOCKED	0x01
81b07ec3fcSad #define	LD_SLEEPER	0x02
82b07ec3fcSad 
83461cd942Sad #define	LD_WRITE_LOCK	0x80000000
84461cd942Sad 
85b07ec3fcSad typedef struct lockdebug {
86879d5dfbSrmind 	struct rb_node	ld_rb_node;
87a4e0004bSad 	__cpu_simple_lock_t ld_spinlock;
88b07ec3fcSad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
89b07ec3fcSad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
90b07ec3fcSad 	volatile void	*ld_lock;
91b07ec3fcSad 	lockops_t	*ld_lockops;
92b07ec3fcSad 	struct lwp	*ld_lwp;
93b07ec3fcSad 	uintptr_t	ld_locked;
94b07ec3fcSad 	uintptr_t	ld_unlocked;
9511dc6399Sad 	uintptr_t	ld_initaddr;
96b07ec3fcSad 	uint16_t	ld_shares;
97b07ec3fcSad 	uint16_t	ld_cpu;
98b07ec3fcSad 	uint8_t		ld_flags;
99b07ec3fcSad 	uint8_t		ld_shwant;	/* advisory */
100b07ec3fcSad 	uint8_t		ld_exwant;	/* advisory */
101b07ec3fcSad 	uint8_t		ld_unused;
102b07ec3fcSad } volatile lockdebug_t;
103b07ec3fcSad 
104b07ec3fcSad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
105b07ec3fcSad 
106a4e0004bSad __cpu_simple_lock_t	ld_mod_lk;
10711910619Smatt lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
108ffdb4a83Schristos #ifdef _KERNEL
10911910619Smatt lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
110ffdb4a83Schristos #else
111ffdb4a83Schristos extern lockdebuglist_t	ld_all;
112ffdb4a83Schristos #define cpu_name(a)	"?"
113ffdb4a83Schristos #define cpu_index(a)	-1
114ffdb4a83Schristos #define curlwp		NULL
115ffdb4a83Schristos #endif /* _KERNEL */
116b07ec3fcSad int			ld_nfree;
117b07ec3fcSad int			ld_freeptr;
118b07ec3fcSad int			ld_recurse;
11964e54fbbSad bool			ld_nomore;
120b07ec3fcSad lockdebug_t		ld_prime[LD_BATCH];
121b07ec3fcSad 
122ffdb4a83Schristos #ifdef _KERNEL
1239be065fbSchristos static void	lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
124a4e0004bSad     const char *, bool);
125a4e0004bSad static int	lockdebug_more(int);
12664e54fbbSad static void	lockdebug_init(void);
127a4da18dbSad static void	lockdebug_dump(lwp_t *, lockdebug_t *,
128a4da18dbSad     void (*)(const char *, ...)
129a27531bcSchristos     __printflike(1, 2));
130b07ec3fcSad 
13138d5e341Syamt static signed int
ld_rbto_compare_nodes(void * ctx,const void * n1,const void * n2)132879d5dfbSrmind ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
13338d5e341Syamt {
134879d5dfbSrmind 	const lockdebug_t *ld1 = n1;
135879d5dfbSrmind 	const lockdebug_t *ld2 = n2;
13633e66db2Syamt 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
13733e66db2Syamt 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
13833e66db2Syamt 
13933e66db2Syamt 	if (a < b)
14033e66db2Syamt 		return -1;
141879d5dfbSrmind 	if (a > b)
142879d5dfbSrmind 		return 1;
14338d5e341Syamt 	return 0;
14438d5e341Syamt }
14538d5e341Syamt 
14638d5e341Syamt static signed int
ld_rbto_compare_key(void * ctx,const void * n,const void * key)147879d5dfbSrmind ld_rbto_compare_key(void *ctx, const void *n, const void *key)
14838d5e341Syamt {
149879d5dfbSrmind 	const lockdebug_t *ld = n;
15033e66db2Syamt 	const uintptr_t a = (uintptr_t)ld->ld_lock;
15133e66db2Syamt 	const uintptr_t b = (uintptr_t)key;
15233e66db2Syamt 
15333e66db2Syamt 	if (a < b)
15433e66db2Syamt 		return -1;
155879d5dfbSrmind 	if (a > b)
156879d5dfbSrmind 		return 1;
15738d5e341Syamt 	return 0;
15838d5e341Syamt }
15938d5e341Syamt 
160879d5dfbSrmind static rb_tree_t ld_rb_tree;
16138d5e341Syamt 
162879d5dfbSrmind static const rb_tree_ops_t ld_rb_tree_ops = {
1635a4f0c6bSmatt 	.rbto_compare_nodes = ld_rbto_compare_nodes,
1645a4f0c6bSmatt 	.rbto_compare_key = ld_rbto_compare_key,
165879d5dfbSrmind 	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
166879d5dfbSrmind 	.rbto_context = NULL
16738d5e341Syamt };
16838d5e341Syamt 
169671754ccSyamt static inline lockdebug_t *
lockdebug_lookup1(const volatile void * lock)170e7f0067cSchristos lockdebug_lookup1(const volatile void *lock)
171671754ccSyamt {
172671754ccSyamt 	lockdebug_t *ld;
173a4e0004bSad 	struct cpu_info *ci;
174671754ccSyamt 
175a4e0004bSad 	ci = curcpu();
176a4e0004bSad 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
177e7f0067cSchristos 	ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
178a4e0004bSad 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
179a4e0004bSad 	if (ld == NULL) {
180671754ccSyamt 		return NULL;
181a4e0004bSad 	}
182a4e0004bSad 	__cpu_simple_lock(&ld->ld_spinlock);
183671754ccSyamt 
184671754ccSyamt 	return ld;
185671754ccSyamt }
186671754ccSyamt 
187a4e0004bSad static void
lockdebug_lock_cpus(void)188a4e0004bSad lockdebug_lock_cpus(void)
189a4e0004bSad {
190a4e0004bSad 	CPU_INFO_ITERATOR cii;
191a4e0004bSad 	struct cpu_info *ci;
192a4e0004bSad 
193a4e0004bSad 	for (CPU_INFO_FOREACH(cii, ci)) {
194a4e0004bSad 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
195a4e0004bSad 	}
196a4e0004bSad }
197a4e0004bSad 
198a4e0004bSad static void
lockdebug_unlock_cpus(void)199a4e0004bSad lockdebug_unlock_cpus(void)
200a4e0004bSad {
201a4e0004bSad 	CPU_INFO_ITERATOR cii;
202a4e0004bSad 	struct cpu_info *ci;
203a4e0004bSad 
204a4e0004bSad 	for (CPU_INFO_FOREACH(cii, ci)) {
205a4e0004bSad 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
206a4e0004bSad 	}
207a4e0004bSad }
208a4e0004bSad 
209b07ec3fcSad /*
210b07ec3fcSad  * lockdebug_lookup:
211b07ec3fcSad  *
21238d5e341Syamt  *	Find a lockdebug structure by a pointer to a lock and return it locked.
213b07ec3fcSad  */
214b07ec3fcSad static inline lockdebug_t *
lockdebug_lookup(const char * func,size_t line,const volatile void * lock,uintptr_t where)215e7f0067cSchristos lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
2169be065fbSchristos     uintptr_t where)
217b07ec3fcSad {
21838d5e341Syamt 	lockdebug_t *ld;
219b07ec3fcSad 
22069ffbd32Smaxv 	kcov_silence_enter();
221a4e0004bSad 	ld = lockdebug_lookup1(lock);
22269ffbd32Smaxv 	kcov_silence_leave();
22369ffbd32Smaxv 
224d54aad2dSozaki-r 	if (__predict_false(ld == NULL)) {
2259be065fbSchristos 		panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
2269be065fbSchristos 		    PRIxPTR ")", func, line, lock, where);
227879d5dfbSrmind 	}
228b07ec3fcSad 	return ld;
229b07ec3fcSad }
230b07ec3fcSad 
231b07ec3fcSad /*
232b07ec3fcSad  * lockdebug_init:
233b07ec3fcSad  *
234b07ec3fcSad  *	Initialize the lockdebug system.  Allocate an initial pool of
235b07ec3fcSad  *	lockdebug structures before the VM system is up and running.
236b07ec3fcSad  */
23764e54fbbSad static void
lockdebug_init(void)238b07ec3fcSad lockdebug_init(void)
239b07ec3fcSad {
240b07ec3fcSad 	lockdebug_t *ld;
241b07ec3fcSad 	int i;
242b07ec3fcSad 
243a4e0004bSad 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
244a4e0004bSad 	TAILQ_INIT(&curlwp->l_ld_locks);
245a4e0004bSad 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
246a4e0004bSad 	__cpu_simple_lock_init(&ld_mod_lk);
2472cab8950Smatt 
24838d5e341Syamt 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
24938d5e341Syamt 
250b07ec3fcSad 	ld = ld_prime;
251b07ec3fcSad 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
252a4e0004bSad 		__cpu_simple_lock_init(&ld->ld_spinlock);
253b07ec3fcSad 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
254b07ec3fcSad 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
255b07ec3fcSad 	}
256b07ec3fcSad 	ld_freeptr = 1;
257b07ec3fcSad 	ld_nfree = LD_BATCH - 1;
258b07ec3fcSad }
259b07ec3fcSad 
260b07ec3fcSad /*
261b07ec3fcSad  * lockdebug_alloc:
262b07ec3fcSad  *
263b07ec3fcSad  *	A lock is being initialized, so allocate an associated debug
264b07ec3fcSad  *	structure.
265b07ec3fcSad  */
26638d5e341Syamt bool
lockdebug_alloc(const char * func,size_t line,volatile void * lock,lockops_t * lo,uintptr_t initaddr)2679be065fbSchristos lockdebug_alloc(const char *func, size_t line, volatile void *lock,
2689be065fbSchristos     lockops_t *lo, uintptr_t initaddr)
269b07ec3fcSad {
270b07ec3fcSad 	struct cpu_info *ci;
271b07ec3fcSad 	lockdebug_t *ld;
272a4e0004bSad 	int s;
273b07ec3fcSad 
274d54aad2dSozaki-r 	if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
27538d5e341Syamt 		return false;
276d54aad2dSozaki-r 	if (__predict_false(ld_freeptr == 0))
27764e54fbbSad 		lockdebug_init();
278b07ec3fcSad 
279a4e0004bSad 	s = splhigh();
280a4e0004bSad 	__cpu_simple_lock(&ld_mod_lk);
281d54aad2dSozaki-r 	if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
282a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
2839be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "already initialized",
2849be065fbSchristos 		    true);
2857eb6056fSad 		return false;
286671754ccSyamt 	}
287671754ccSyamt 
288b07ec3fcSad 	/*
289b07ec3fcSad 	 * Pinch a new debug structure.  We may recurse because we call
290b07ec3fcSad 	 * kmem_alloc(), which may need to initialize new locks somewhere
2915492d866Sskrll 	 * down the path.  If not recursing, we try to maintain at least
292b07ec3fcSad 	 * LD_SLOP structures free, which should hopefully be enough to
293b07ec3fcSad 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
294b07ec3fcSad 	 * worry: we'll just mark the lock as not having an ID.
295b07ec3fcSad 	 */
296461cd942Sad 	ci = curcpu();
297b07ec3fcSad 	ci->ci_lkdebug_recurse++;
298b07ec3fcSad 	if (TAILQ_EMPTY(&ld_free)) {
29964e54fbbSad 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
300b07ec3fcSad 			ci->ci_lkdebug_recurse--;
301a4e0004bSad 			__cpu_simple_unlock(&ld_mod_lk);
302a4e0004bSad 			splx(s);
30338d5e341Syamt 			return false;
304b07ec3fcSad 		}
305a4e0004bSad 		s = lockdebug_more(s);
306a4e0004bSad 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
307a4e0004bSad 		s = lockdebug_more(s);
308a4e0004bSad 	}
309d54aad2dSozaki-r 	if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
310a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
311a4e0004bSad 		splx(s);
31238d5e341Syamt 		return false;
313b07ec3fcSad 	}
314b07ec3fcSad 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
315b07ec3fcSad 	ld_nfree--;
316b07ec3fcSad 	ci->ci_lkdebug_recurse--;
317b07ec3fcSad 
318d54aad2dSozaki-r 	if (__predict_false(ld->ld_lock != NULL)) {
3199be065fbSchristos 		panic("%s,%zu: corrupt table ld %p", func, line, ld);
320a4e0004bSad 	}
321b07ec3fcSad 
322b07ec3fcSad 	/* Initialise the structure. */
323b07ec3fcSad 	ld->ld_lock = lock;
324b07ec3fcSad 	ld->ld_lockops = lo;
325b07ec3fcSad 	ld->ld_locked = 0;
326b07ec3fcSad 	ld->ld_unlocked = 0;
327b07ec3fcSad 	ld->ld_lwp = NULL;
32811dc6399Sad 	ld->ld_initaddr = initaddr;
3297b8f5124Sad 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
330a4e0004bSad 	lockdebug_lock_cpus();
331879d5dfbSrmind 	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
332a4e0004bSad 	lockdebug_unlock_cpus();
333a4e0004bSad 	__cpu_simple_unlock(&ld_mod_lk);
33438d5e341Syamt 
335a4e0004bSad 	splx(s);
33638d5e341Syamt 	return true;
337b07ec3fcSad }
338b07ec3fcSad 
339b07ec3fcSad /*
340b07ec3fcSad  * lockdebug_free:
341b07ec3fcSad  *
342b07ec3fcSad  *	A lock is being destroyed, so release debugging resources.
343b07ec3fcSad  */
344b07ec3fcSad void
lockdebug_free(const char * func,size_t line,volatile void * lock)3459be065fbSchristos lockdebug_free(const char *func, size_t line, volatile void *lock)
346b07ec3fcSad {
347b07ec3fcSad 	lockdebug_t *ld;
348a4e0004bSad 	int s;
349b07ec3fcSad 
350d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
351b07ec3fcSad 		return;
352b07ec3fcSad 
353a4e0004bSad 	s = splhigh();
354a4e0004bSad 	__cpu_simple_lock(&ld_mod_lk);
3559be065fbSchristos 	ld = lockdebug_lookup(func, line, lock,
3569be065fbSchristos 	    (uintptr_t) __builtin_return_address(0));
357d54aad2dSozaki-r 	if (__predict_false(ld == NULL)) {
358a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
3599be065fbSchristos 		panic("%s,%zu: destroying uninitialized object %p"
3609be065fbSchristos 		    "(ld_lock=%p)", func, line, lock, ld->ld_lock);
3617eb6056fSad 		return;
362b07ec3fcSad 	}
363d54aad2dSozaki-r 	if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
364d54aad2dSozaki-r 	    ld->ld_shares != 0)) {
365a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
3669be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "is locked or in use",
3679be065fbSchristos 		    true);
3687eb6056fSad 		return;
3697eb6056fSad 	}
370a4e0004bSad 	lockdebug_lock_cpus();
371879d5dfbSrmind 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
372a4e0004bSad 	lockdebug_unlock_cpus();
373b07ec3fcSad 	ld->ld_lock = NULL;
374b07ec3fcSad 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
375b07ec3fcSad 	ld_nfree++;
376a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
377a4e0004bSad 	__cpu_simple_unlock(&ld_mod_lk);
378a4e0004bSad 	splx(s);
379b07ec3fcSad }
380b07ec3fcSad 
381b07ec3fcSad /*
382b07ec3fcSad  * lockdebug_more:
383b07ec3fcSad  *
384b07ec3fcSad  *	Allocate a batch of debug structures and add to the free list.
385a4e0004bSad  *	Must be called with ld_mod_lk held.
386b07ec3fcSad  */
387a4e0004bSad static int
lockdebug_more(int s)388a4e0004bSad lockdebug_more(int s)
389b07ec3fcSad {
390b07ec3fcSad 	lockdebug_t *ld;
391b07ec3fcSad 	void *block;
39264e54fbbSad 	int i, base, m;
393b07ec3fcSad 
3947b8f5124Sad 	/*
3957b8f5124Sad 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
3967b8f5124Sad 	 * deadlock, because we don't know which locks the caller holds.
3977b8f5124Sad 	 */
3984e425594Sozaki-r 	if (cpu_intr_p() || cpu_softintr_p()) {
3997b8f5124Sad 		return s;
4007b8f5124Sad 	}
4017b8f5124Sad 
402b07ec3fcSad 	while (ld_nfree < LD_SLOP) {
403a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
404a4e0004bSad 		splx(s);
405b07ec3fcSad 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
406a4e0004bSad 		s = splhigh();
407a4e0004bSad 		__cpu_simple_lock(&ld_mod_lk);
408b07ec3fcSad 
409b07ec3fcSad 		if (ld_nfree > LD_SLOP) {
410b07ec3fcSad 			/* Somebody beat us to it. */
411a4e0004bSad 			__cpu_simple_unlock(&ld_mod_lk);
412a4e0004bSad 			splx(s);
413b07ec3fcSad 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
414a4e0004bSad 			s = splhigh();
415a4e0004bSad 			__cpu_simple_lock(&ld_mod_lk);
416b07ec3fcSad 			continue;
417b07ec3fcSad 		}
418b07ec3fcSad 
419b07ec3fcSad 		base = ld_freeptr;
420b07ec3fcSad 		ld_nfree += LD_BATCH;
421b07ec3fcSad 		ld = block;
422b07ec3fcSad 		base <<= LD_BATCH_SHIFT;
423d1579b2dSriastradh 		m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
424b07ec3fcSad 
42564e54fbbSad 		if (m == LD_MAX_LOCKS)
42664e54fbbSad 			ld_nomore = true;
42764e54fbbSad 
42864e54fbbSad 		for (i = base; i < m; i++, ld++) {
429a4e0004bSad 			__cpu_simple_lock_init(&ld->ld_spinlock);
430b07ec3fcSad 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
431b07ec3fcSad 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
432b07ec3fcSad 		}
433b07ec3fcSad 
434b470ab62Sad 		membar_producer();
435b07ec3fcSad 	}
436a4e0004bSad 
437a4e0004bSad 	return s;
438b07ec3fcSad }
439b07ec3fcSad 
440b07ec3fcSad /*
441b07ec3fcSad  * lockdebug_wantlock:
442b07ec3fcSad  *
44348e395b1Spgoyette  *	Process the preamble to a lock acquire.  The "shared"
44448e395b1Spgoyette  *	parameter controls which ld_{ex,sh}want counter is
44548e395b1Spgoyette  *	updated; a negative value of shared updates neither.
446b07ec3fcSad  */
447b07ec3fcSad void
lockdebug_wantlock(const char * func,size_t line,const volatile void * lock,uintptr_t where,int shared)4489be065fbSchristos lockdebug_wantlock(const char *func, size_t line,
449e7f0067cSchristos     const volatile void *lock, uintptr_t where, int shared)
450b07ec3fcSad {
451b07ec3fcSad 	struct lwp *l = curlwp;
452b07ec3fcSad 	lockdebug_t *ld;
453dd962f86Sthorpej 	bool recurse;
454a4e0004bSad 	int s;
455b07ec3fcSad 
456b07ec3fcSad 	(void)shared;
4574f3d5a9cSthorpej 	recurse = false;
458b07ec3fcSad 
459d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
460b07ec3fcSad 		return;
461b07ec3fcSad 
462a4e0004bSad 	s = splhigh();
4639be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
464a4e0004bSad 		splx(s);
465b07ec3fcSad 		return;
466a4e0004bSad 	}
467839080f7Syamt 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
468b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
469060c06beSmlelstv 			if (ld->ld_lwp == l)
4704f3d5a9cSthorpej 				recurse = true;
471ac8f6353Srmind 		} else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
4724f3d5a9cSthorpej 			recurse = true;
473b07ec3fcSad 	}
47411dc6399Sad 	if (cpu_intr_p()) {
475d54aad2dSozaki-r 		if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
4769be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
47711dc6399Sad 			    "acquiring sleep lock from interrupt context",
47811dc6399Sad 			    true);
4797eb6056fSad 			return;
4807eb6056fSad 		}
48111dc6399Sad 	}
48248e395b1Spgoyette 	if (shared > 0)
483b07ec3fcSad 		ld->ld_shwant++;
48448e395b1Spgoyette 	else if (shared == 0)
485b07ec3fcSad 		ld->ld_exwant++;
486d54aad2dSozaki-r 	if (__predict_false(recurse)) {
4879be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "locking against myself",
48811dc6399Sad 		    true);
4897eb6056fSad 		return;
4907eb6056fSad 	}
491a4da18dbSad 	if (l->l_ld_wanted == NULL) {
492a4da18dbSad 		l->l_ld_wanted = ld;
493a4da18dbSad 	}
494a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
495a4e0004bSad 	splx(s);
496b07ec3fcSad }
497b07ec3fcSad 
498b07ec3fcSad /*
499b07ec3fcSad  * lockdebug_locked:
500b07ec3fcSad  *
501b07ec3fcSad  *	Process a lock acquire operation.
502b07ec3fcSad  */
503b07ec3fcSad void
lockdebug_locked(const char * func,size_t line,volatile void * lock,void * cvlock,uintptr_t where,int shared)5049be065fbSchristos lockdebug_locked(const char *func, size_t line,
5059be065fbSchristos     volatile void *lock, void *cvlock, uintptr_t where, int shared)
506b07ec3fcSad {
507b07ec3fcSad 	struct lwp *l = curlwp;
508b07ec3fcSad 	lockdebug_t *ld;
509a4e0004bSad 	int s;
510b07ec3fcSad 
511d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
512b07ec3fcSad 		return;
513b07ec3fcSad 
514a4e0004bSad 	s = splhigh();
5159be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
516a4e0004bSad 		splx(s);
517b07ec3fcSad 		return;
518a4e0004bSad 	}
519e0bb7e8eSad 	if (shared) {
520b07ec3fcSad 		l->l_shlocks++;
5219d109b30Syamt 		ld->ld_locked = where;
522b07ec3fcSad 		ld->ld_shares++;
523b07ec3fcSad 		ld->ld_shwant--;
524b07ec3fcSad 	} else {
525d54aad2dSozaki-r 		if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
5269be065fbSchristos 			lockdebug_abort1(func, line, ld, s, "already locked",
527a4e0004bSad 			    true);
5287eb6056fSad 			return;
5297eb6056fSad 		}
530b07ec3fcSad 		ld->ld_flags |= LD_LOCKED;
531b07ec3fcSad 		ld->ld_locked = where;
532b07ec3fcSad 		ld->ld_exwant--;
533b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
534a4e0004bSad 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
535b07ec3fcSad 		} else {
536a4e0004bSad 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
537a4e0004bSad 			    ld, ld_chain);
538b07ec3fcSad 		}
539b07ec3fcSad 	}
540ac8f6353Srmind 	ld->ld_cpu = (uint16_t)cpu_index(curcpu());
541839080f7Syamt 	ld->ld_lwp = l;
542a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
543a4da18dbSad 	if (l->l_ld_wanted == ld) {
544a4da18dbSad 		l->l_ld_wanted = NULL;
545a4da18dbSad 	}
546a4e0004bSad 	splx(s);
547b07ec3fcSad }
548b07ec3fcSad 
549b07ec3fcSad /*
550b07ec3fcSad  * lockdebug_unlocked:
551b07ec3fcSad  *
552b07ec3fcSad  *	Process a lock release operation.
553b07ec3fcSad  */
554b07ec3fcSad void
lockdebug_unlocked(const char * func,size_t line,volatile void * lock,uintptr_t where,int shared)5559be065fbSchristos lockdebug_unlocked(const char *func, size_t line,
5569be065fbSchristos     volatile void *lock, uintptr_t where, int shared)
557b07ec3fcSad {
558b07ec3fcSad 	struct lwp *l = curlwp;
559b07ec3fcSad 	lockdebug_t *ld;
560a4e0004bSad 	int s;
561b07ec3fcSad 
562d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
563b07ec3fcSad 		return;
564b07ec3fcSad 
565a4e0004bSad 	s = splhigh();
5669be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
567a4e0004bSad 		splx(s);
568b07ec3fcSad 		return;
569a4e0004bSad 	}
570e0bb7e8eSad 	if (shared) {
571d54aad2dSozaki-r 		if (__predict_false(l->l_shlocks == 0)) {
5729be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
57311dc6399Sad 			    "no shared locks held by LWP", true);
5747eb6056fSad 			return;
5757eb6056fSad 		}
576d54aad2dSozaki-r 		if (__predict_false(ld->ld_shares == 0)) {
5779be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
57811dc6399Sad 			    "no shared holds on this lock", true);
5797eb6056fSad 			return;
5807eb6056fSad 		}
581b07ec3fcSad 		l->l_shlocks--;
582b07ec3fcSad 		ld->ld_shares--;
5839d109b30Syamt 		if (ld->ld_lwp == l) {
5849d109b30Syamt 			ld->ld_unlocked = where;
585839080f7Syamt 			ld->ld_lwp = NULL;
5869d109b30Syamt 		}
587ac8f6353Srmind 		if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
588839080f7Syamt 			ld->ld_cpu = (uint16_t)-1;
589b07ec3fcSad 	} else {
590d54aad2dSozaki-r 		if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
5919be065fbSchristos 			lockdebug_abort1(func, line, ld, s, "not locked", true);
5927eb6056fSad 			return;
5937eb6056fSad 		}
594b07ec3fcSad 
595b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
596d54aad2dSozaki-r 			if (__predict_false(ld->ld_lwp != curlwp)) {
5979be065fbSchristos 				lockdebug_abort1(func, line, ld, s,
59811dc6399Sad 				    "not held by current LWP", true);
5997eb6056fSad 				return;
6007eb6056fSad 			}
601a4e0004bSad 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
602b07ec3fcSad 		} else {
603d54aad2dSozaki-r 			uint16_t idx = (uint16_t)cpu_index(curcpu());
604d54aad2dSozaki-r 			if (__predict_false(ld->ld_cpu != idx)) {
6059be065fbSchristos 				lockdebug_abort1(func, line, ld, s,
60611dc6399Sad 				    "not held by current CPU", true);
6077eb6056fSad 				return;
6087eb6056fSad 			}
609a4e0004bSad 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
610a4e0004bSad 			    ld_chain);
611b07ec3fcSad 		}
612521a86d5Smatt 		ld->ld_flags &= ~LD_LOCKED;
613521a86d5Smatt 		ld->ld_unlocked = where;
614521a86d5Smatt 		ld->ld_lwp = NULL;
615b07ec3fcSad 	}
616a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
617a4e0004bSad 	splx(s);
618b07ec3fcSad }
619b07ec3fcSad 
620b07ec3fcSad /*
621b07ec3fcSad  * lockdebug_barrier:
622b07ec3fcSad  *
62341a8f863Sad  *	Panic if we hold more than one specified lock, and optionally, if we
62441a8f863Sad  *	hold any sleep locks.
625b07ec3fcSad  */
626b07ec3fcSad void
lockdebug_barrier(const char * func,size_t line,volatile void * onelock,int slplocks)62741a8f863Sad lockdebug_barrier(const char *func, size_t line, volatile void *onelock,
6289be065fbSchristos     int slplocks)
629b07ec3fcSad {
630b07ec3fcSad 	struct lwp *l = curlwp;
631b07ec3fcSad 	lockdebug_t *ld;
632a4e0004bSad 	int s;
633b07ec3fcSad 
634d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
635b07ec3fcSad 		return;
636b07ec3fcSad 
637a4e0004bSad 	s = splhigh();
638a4e0004bSad 	if ((l->l_pflag & LP_INTR) == 0) {
639a4e0004bSad 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
64041a8f863Sad 			if (ld->ld_lock == onelock) {
641b07ec3fcSad 				continue;
642b07ec3fcSad 			}
643a4e0004bSad 			__cpu_simple_lock(&ld->ld_spinlock);
6449be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
645a4e0004bSad 			    "spin lock held", true);
6467eb6056fSad 			return;
6477eb6056fSad 		}
648b07ec3fcSad 	}
649a4e0004bSad 	if (slplocks) {
650a4e0004bSad 		splx(s);
6517eb6056fSad 		return;
6527eb6056fSad 	}
653d54aad2dSozaki-r 	ld = TAILQ_FIRST(&l->l_ld_locks);
65441a8f863Sad 	if (__predict_false(ld != NULL && ld->ld_lock != onelock)) {
655a4e0004bSad 		__cpu_simple_lock(&ld->ld_spinlock);
6569be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
657a4e0004bSad 		return;
658b07ec3fcSad 	}
659a4e0004bSad 	splx(s);
660a4e0004bSad 	if (l->l_shlocks != 0) {
661a27531bcSchristos 		TAILQ_FOREACH(ld, &ld_all, ld_achain) {
66241a8f863Sad 			if (ld->ld_lock == onelock) {
66341a8f863Sad 				continue;
66441a8f863Sad 			}
665a27531bcSchristos 			if (ld->ld_lwp == l)
666a4da18dbSad 				lockdebug_dump(l, ld, printf);
667a27531bcSchristos 		}
6689be065fbSchristos 		panic("%s,%zu: holding %d shared locks", func, line,
6699be065fbSchristos 		    l->l_shlocks);
670b07ec3fcSad 	}
671b07ec3fcSad }
672b07ec3fcSad 
673b07ec3fcSad /*
67411dc6399Sad  * lockdebug_mem_check:
67511dc6399Sad  *
67611dc6399Sad  *	Check for in-use locks within a memory region that is
67738d5e341Syamt  *	being freed.
67811dc6399Sad  */
67911dc6399Sad void
lockdebug_mem_check(const char * func,size_t line,void * base,size_t sz)6809be065fbSchristos lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
68111dc6399Sad {
68211dc6399Sad 	lockdebug_t *ld;
683a4e0004bSad 	struct cpu_info *ci;
684461cd942Sad 	int s;
68511dc6399Sad 
686d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
687ea3f10f7Sad 		return;
688ea3f10f7Sad 
68969ffbd32Smaxv 	kcov_silence_enter();
69069ffbd32Smaxv 
691a4e0004bSad 	s = splhigh();
692a4e0004bSad 	ci = curcpu();
693a4e0004bSad 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
69438d5e341Syamt 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
695461cd942Sad 	if (ld != NULL) {
696461cd942Sad 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
697461cd942Sad 
698d54aad2dSozaki-r 		if (__predict_false((uintptr_t)base > lock))
6999be065fbSchristos 			panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
7009be065fbSchristos 			    func, line, ld, base, sz);
701461cd942Sad 		if (lock >= (uintptr_t)base + sz)
702461cd942Sad 			ld = NULL;
703461cd942Sad 	}
704a4e0004bSad 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
705d54aad2dSozaki-r 	if (__predict_false(ld != NULL)) {
706a4e0004bSad 		__cpu_simple_lock(&ld->ld_spinlock);
7079be065fbSchristos 		lockdebug_abort1(func, line, ld, s,
70811dc6399Sad 		    "allocation contains active lock", !cold);
70969ffbd32Smaxv 		kcov_silence_leave();
710a4e0004bSad 		return;
711a4e0004bSad 	}
712a4e0004bSad 	splx(s);
71369ffbd32Smaxv 
71469ffbd32Smaxv 	kcov_silence_leave();
71511dc6399Sad }
716ffdb4a83Schristos #endif /* _KERNEL */
717ffdb4a83Schristos 
71811dc6399Sad /*
719b07ec3fcSad  * lockdebug_dump:
720b07ec3fcSad  *
721b07ec3fcSad  *	Dump information about a lock on panic, or for DDB.
722b07ec3fcSad  */
723b07ec3fcSad static void
724a4da18dbSad lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...)
725a67c3c89Schristos     __printflike(1, 2))
726b07ec3fcSad {
727b07ec3fcSad 	int sleeper = (ld->ld_flags & LD_SLEEPER);
728ffdb4a83Schristos 	lockops_t *lo = ld->ld_lockops;
7294707b092Sriastradh 	char locksym[128], initsym[128], lockedsym[128], unlockedsym[128];
7304707b092Sriastradh 
7314707b092Sriastradh #ifdef DDB
732*e8f73aaeSnakayama 	db_symstr(locksym, sizeof(locksym), (db_expr_t)(intptr_t)ld->ld_lock,
7334707b092Sriastradh 	    DB_STGY_ANY);
7344707b092Sriastradh 	db_symstr(initsym, sizeof(initsym), (db_expr_t)ld->ld_initaddr,
7354707b092Sriastradh 	    DB_STGY_PROC);
7364707b092Sriastradh 	db_symstr(lockedsym, sizeof(lockedsym), (db_expr_t)ld->ld_locked,
7374707b092Sriastradh 	    DB_STGY_PROC);
7384707b092Sriastradh 	db_symstr(unlockedsym, sizeof(unlockedsym), (db_expr_t)ld->ld_unlocked,
7394707b092Sriastradh 	    DB_STGY_PROC);
7404707b092Sriastradh #else
7414707b092Sriastradh 	snprintf(locksym, sizeof(locksym), "%#018lx",
7424707b092Sriastradh 	    (unsigned long)ld->ld_lock);
7434707b092Sriastradh 	snprintf(initsym, sizeof(initsym), "%#018lx",
7444707b092Sriastradh 	    (unsigned long)ld->ld_initaddr);
7454707b092Sriastradh 	snprintf(lockedsym, sizeof(lockedsym), "%#018lx",
7464707b092Sriastradh 	    (unsigned long)ld->ld_locked);
7474707b092Sriastradh 	snprintf(unlockedsym, sizeof(unlockedsym), "%#018lx",
7484707b092Sriastradh 	    (unsigned long)ld->ld_unlocked);
7494707b092Sriastradh #endif
750b07ec3fcSad 
751b07ec3fcSad 	(*pr)(
7524707b092Sriastradh 	    "lock address : %s\n"
7534707b092Sriastradh 	    "type         : %s\n"
7544707b092Sriastradh 	    "initialized  : %s",
7554707b092Sriastradh 	    locksym, (sleeper ? "sleep/adaptive" : "spin"),
7564707b092Sriastradh 	    initsym);
7577b8f5124Sad 
758ffdb4a83Schristos #ifndef _KERNEL
759ffdb4a83Schristos 	lockops_t los;
760ffdb4a83Schristos 	lo = &los;
761ffdb4a83Schristos 	db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo);
762ffdb4a83Schristos #endif
7637b8f5124Sad 	(*pr)("\n"
764b07ec3fcSad 	    "shared holds : %18u exclusive: %18u\n"
765b07ec3fcSad 	    "shares wanted: %18u exclusive: %18u\n"
766a4da18dbSad 	    "relevant cpu : %18u last held: %18u\n"
767a4da18dbSad 	    "relevant lwp : %#018lx last held: %#018lx\n"
7684707b092Sriastradh 	    "last locked%c : %s\n"
7694707b092Sriastradh 	    "unlocked%c    : %s\n",
770b07ec3fcSad 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
771b07ec3fcSad 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
772a4da18dbSad 	    (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu,
773a4da18dbSad 	    (long)l, (long)ld->ld_lwp,
774521a86d5Smatt 	    ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
7754707b092Sriastradh 	    lockedsym,
776521a86d5Smatt 	    ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
7774707b092Sriastradh 	    unlockedsym);
778b07ec3fcSad 
779ffdb4a83Schristos #ifdef _KERNEL
780ffdb4a83Schristos 	if (lo->lo_dump != NULL)
781ffdb4a83Schristos 		(*lo->lo_dump)(ld->ld_lock, pr);
782b07ec3fcSad 
783b07ec3fcSad 	if (sleeper) {
784b07ec3fcSad 		turnstile_print(ld->ld_lock, pr);
785b07ec3fcSad 	}
786ffdb4a83Schristos #endif
787b07ec3fcSad }
788b07ec3fcSad 
789ffdb4a83Schristos #ifdef _KERNEL
790b07ec3fcSad /*
7917eb6056fSad  * lockdebug_abort1:
792b07ec3fcSad  *
7937eb6056fSad  *	An error has been trapped - dump lock info and panic.
794b07ec3fcSad  */
79564e54fbbSad static void
lockdebug_abort1(const char * func,size_t line,lockdebug_t * ld,int s,const char * msg,bool dopanic)7969be065fbSchristos lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
79711dc6399Sad 		 const char *msg, bool dopanic)
798b07ec3fcSad {
799b07ec3fcSad 
8007eb6056fSad 	/*
801d9ddb522Schristos 	 * Don't make the situation worse if the system is already going
8027eb6056fSad 	 * down in flames.  Once a panic is triggered, lockdebug state
8037eb6056fSad 	 * becomes stale and cannot be trusted.
8047eb6056fSad 	 */
8057eb6056fSad 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
806a4e0004bSad 		__cpu_simple_unlock(&ld->ld_spinlock);
807a4e0004bSad 		splx(s);
8087eb6056fSad 		return;
8097eb6056fSad 	}
8107eb6056fSad 
811f7a71118Sriastradh 	printf("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
8129be065fbSchristos 	    func, line, msg);
813f7a71118Sriastradh 	lockdebug_dump(curlwp, ld, printf);
814a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
815a4e0004bSad 	splx(s);
816f7a71118Sriastradh 	printf("\n");
81711dc6399Sad 	if (dopanic)
8189be065fbSchristos 		panic("LOCKDEBUG: %s error: %s,%zu: %s",
8199be065fbSchristos 		    ld->ld_lockops->lo_name, func, line, msg);
820b07ec3fcSad }
821b07ec3fcSad 
822ffdb4a83Schristos #endif /* _KERNEL */
823b07ec3fcSad #endif	/* LOCKDEBUG */
824b07ec3fcSad 
825b07ec3fcSad /*
826b07ec3fcSad  * lockdebug_lock_print:
827b07ec3fcSad  *
828b07ec3fcSad  *	Handle the DDB 'show lock' command.
829b07ec3fcSad  */
830b07ec3fcSad #ifdef DDB
831b07ec3fcSad void
832fab43db2Schristos lockdebug_lock_print(void *addr,
833fab43db2Schristos     void (*pr)(const char *, ...) __printflike(1, 2))
834b07ec3fcSad {
835b07ec3fcSad #ifdef LOCKDEBUG
836ffdb4a83Schristos 	lockdebug_t *ld, lds;
837b07ec3fcSad 
838b07ec3fcSad 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
839ffdb4a83Schristos 		db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds));
840ffdb4a83Schristos 		ld = &lds;
841648f423cSdyoung 		if (ld->ld_lock == NULL)
842648f423cSdyoung 			continue;
843648f423cSdyoung 		if (addr == NULL || ld->ld_lock == addr) {
844a4da18dbSad 			lockdebug_dump(curlwp, ld, pr);
845648f423cSdyoung 			if (addr != NULL)
846b07ec3fcSad 				return;
847b07ec3fcSad 		}
848b07ec3fcSad 	}
849648f423cSdyoung 	if (addr != NULL) {
850648f423cSdyoung 		(*pr)("Sorry, no record of a lock with address %p found.\n",
851648f423cSdyoung 		    addr);
852648f423cSdyoung 	}
853b07ec3fcSad #else
8544707b092Sriastradh 	char sym[128];
8554707b092Sriastradh 	uintptr_t word;
8564707b092Sriastradh 
8574707b092Sriastradh 	(*pr)("WARNING: lock print is unreliable without LOCKDEBUG\n");
858*e8f73aaeSnakayama 	db_symstr(sym, sizeof(sym), (db_expr_t)(intptr_t)addr, DB_STGY_ANY);
8591bfc2d5fSmsaitoh 	db_read_bytes((db_addr_t)addr, sizeof(word), (char *)&word);
8601bfc2d5fSmsaitoh 	(*pr)("%s: possible owner: %p, bits: 0x%" PRIxPTR "\n", sym,
8614707b092Sriastradh 	    (void *)(word & ~(uintptr_t)ALIGNBYTES), word & ALIGNBYTES);
862b07ec3fcSad #endif	/* LOCKDEBUG */
863b07ec3fcSad }
8641d919413Sozaki-r 
865ffdb4a83Schristos #ifdef _KERNEL
866e611636fSozaki-r #ifdef LOCKDEBUG
867e611636fSozaki-r static void
868a4da18dbSad lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i,
869fab43db2Schristos     void (*pr)(const char *, ...) __printflike(1, 2))
870fab43db2Schristos {
8714707b092Sriastradh 	char sym[128];
872fab43db2Schristos 
8734707b092Sriastradh #ifdef DDB
8744707b092Sriastradh 	db_symstr(sym, sizeof(sym), (db_expr_t)ld->ld_initaddr, DB_STGY_PROC);
8754707b092Sriastradh #else
8764707b092Sriastradh 	snprintf(sym, sizeof(sym), "%p", (void *)ld->ld_initaddr);
877ffdb4a83Schristos #endif
878a4da18dbSad 	(*pr)("* Lock %d (initialized at %s)\n", i++, sym);
879a4da18dbSad 	lockdebug_dump(l, ld, pr);
880fab43db2Schristos }
881fab43db2Schristos 
882fab43db2Schristos static void
883fab43db2Schristos lockdebug_show_trace(const void *ptr,
884fab43db2Schristos     void (*pr)(const char *, ...) __printflike(1, 2))
885fab43db2Schristos {
886d6068b83Srin 
887fab43db2Schristos 	db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
888fab43db2Schristos }
889fab43db2Schristos 
890fab43db2Schristos static void
891fab43db2Schristos lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
892fab43db2Schristos     bool show_trace)
893e611636fSozaki-r {
894e611636fSozaki-r 	struct proc *p;
895e611636fSozaki-r 
896e611636fSozaki-r 	LIST_FOREACH(p, &allproc, p_list) {
897e611636fSozaki-r 		struct lwp *l;
898e611636fSozaki-r 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
899e611636fSozaki-r 			lockdebug_t *ld;
900e611636fSozaki-r 			int i = 0;
901a4da18dbSad 			if (TAILQ_EMPTY(&l->l_ld_locks) &&
902a4da18dbSad 			    l->l_ld_wanted == NULL) {
903e611636fSozaki-r 			    	continue;
904e611636fSozaki-r 			}
905a4da18dbSad 			(*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n",
906a4da18dbSad 			    p->p_pid, l->l_lid,
907a4da18dbSad 			    l->l_name ? l->l_name : p->p_comm, l, l->l_stat);
908a4da18dbSad 			if (!TAILQ_EMPTY(&l->l_ld_locks)) {
909a4da18dbSad 				(*pr)("\n*** Locks held: \n");
910a4da18dbSad 				TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
911a4da18dbSad 					(*pr)("\n");
912a4da18dbSad 					lockdebug_show_one(l, ld, i++, pr);
913a4da18dbSad 				}
914a4da18dbSad 			} else {
915a4da18dbSad 				(*pr)("\n*** Locks held: none\n");
916a4da18dbSad 			}
917a4da18dbSad 
918a4da18dbSad 			if (l->l_ld_wanted != NULL) {
919a4da18dbSad 				(*pr)("\n*** Locks wanted: \n\n");
920a4da18dbSad 				lockdebug_show_one(l, l->l_ld_wanted, 0, pr);
921a4da18dbSad 			} else {
922a4da18dbSad 				(*pr)("\n*** Locks wanted: none\n");
923a4da18dbSad 			}
924a4da18dbSad 			if (show_trace) {
925a4da18dbSad 				(*pr)("\n*** Traceback: \n\n");
926fab43db2Schristos 				lockdebug_show_trace(l, pr);
927e611636fSozaki-r 				(*pr)("\n");
928e611636fSozaki-r 			}
929e611636fSozaki-r 		}
930e611636fSozaki-r 	}
931a4da18dbSad }
932e611636fSozaki-r 
933e611636fSozaki-r static void
934fab43db2Schristos lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2),
935fab43db2Schristos     bool show_trace)
936e611636fSozaki-r {
937e611636fSozaki-r 	lockdebug_t *ld;
938e611636fSozaki-r 	CPU_INFO_ITERATOR cii;
939e611636fSozaki-r 	struct cpu_info *ci;
940e611636fSozaki-r 
941e611636fSozaki-r 	for (CPU_INFO_FOREACH(cii, ci)) {
942e611636fSozaki-r 		int i = 0;
943e611636fSozaki-r 		if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
944e611636fSozaki-r 			continue;
945a4da18dbSad 		(*pr)("\n******* Locks held on %s:\n", cpu_name(ci));
946e611636fSozaki-r 		TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
947a4da18dbSad 			(*pr)("\n");
9484f6cb4feSmrg #ifdef MULTIPROCESSOR
949a4da18dbSad 			lockdebug_show_one(ci->ci_curlwp, ld, i++, pr);
950a4da18dbSad 			if (show_trace)
951fab43db2Schristos 				lockdebug_show_trace(ci->ci_curlwp, pr);
9524f6cb4feSmrg #else
953a4da18dbSad 			lockdebug_show_one(curlwp, ld, i++, pr);
954a4da18dbSad 			if (show_trace)
955099a5e1dSryo 				lockdebug_show_trace(curlwp, pr);
9564f6cb4feSmrg #endif
957e611636fSozaki-r 		}
958e611636fSozaki-r 	}
959e611636fSozaki-r }
960ffdb4a83Schristos #endif /* _KERNEL */
961e611636fSozaki-r #endif	/* LOCKDEBUG */
962e611636fSozaki-r 
963ffdb4a83Schristos #ifdef _KERNEL
964e611636fSozaki-r void
965fab43db2Schristos lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
966fab43db2Schristos     const char *modif)
967e611636fSozaki-r {
968e611636fSozaki-r #ifdef LOCKDEBUG
969e611636fSozaki-r 	bool show_trace = false;
970e611636fSozaki-r 	if (modif[0] == 't')
971e611636fSozaki-r 		show_trace = true;
972e611636fSozaki-r 
973e611636fSozaki-r 	(*pr)("[Locks tracked through LWPs]\n");
974e611636fSozaki-r 	lockdebug_show_all_locks_lwp(pr, show_trace);
975e611636fSozaki-r 	(*pr)("\n");
976e611636fSozaki-r 
977e611636fSozaki-r 	(*pr)("[Locks tracked through CPUs]\n");
978e611636fSozaki-r 	lockdebug_show_all_locks_cpu(pr, show_trace);
979e611636fSozaki-r 	(*pr)("\n");
980e611636fSozaki-r #else
981e611636fSozaki-r 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
982e611636fSozaki-r #endif	/* LOCKDEBUG */
983e611636fSozaki-r }
984e611636fSozaki-r 
9851d919413Sozaki-r void
986fab43db2Schristos lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2))
9871d919413Sozaki-r {
9881d919413Sozaki-r #ifdef LOCKDEBUG
9891d919413Sozaki-r 	lockdebug_t *ld;
9901d919413Sozaki-r 	void *_ld;
9911d919413Sozaki-r 	uint32_t n_null = 0;
9921d919413Sozaki-r 	uint32_t n_spin_mutex = 0;
9931d919413Sozaki-r 	uint32_t n_adaptive_mutex = 0;
9941d919413Sozaki-r 	uint32_t n_rwlock = 0;
9951d919413Sozaki-r 	uint32_t n_others = 0;
9961d919413Sozaki-r 
9971d919413Sozaki-r 	RB_TREE_FOREACH(_ld, &ld_rb_tree) {
9981d919413Sozaki-r 		ld = _ld;
9991d919413Sozaki-r 		if (ld->ld_lock == NULL) {
10001d919413Sozaki-r 			n_null++;
10011d919413Sozaki-r 			continue;
10021d919413Sozaki-r 		}
10031d919413Sozaki-r 		if (ld->ld_lockops->lo_name[0] == 'M') {
10041d919413Sozaki-r 			if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
10051d919413Sozaki-r 				n_adaptive_mutex++;
10061d919413Sozaki-r 			else
10071d919413Sozaki-r 				n_spin_mutex++;
10081d919413Sozaki-r 			continue;
10091d919413Sozaki-r 		}
10101d919413Sozaki-r 		if (ld->ld_lockops->lo_name[0] == 'R') {
10111d919413Sozaki-r 			n_rwlock++;
10121d919413Sozaki-r 			continue;
10131d919413Sozaki-r 		}
10141d919413Sozaki-r 		n_others++;
10151d919413Sozaki-r 	}
10161d919413Sozaki-r 	(*pr)(
10171d919413Sozaki-r 	    "spin mutex: %u\n"
10181d919413Sozaki-r 	    "adaptive mutex: %u\n"
10191d919413Sozaki-r 	    "rwlock: %u\n"
10201d919413Sozaki-r 	    "null locks: %u\n"
10211d919413Sozaki-r 	    "others: %u\n",
1022e0bb7e8eSad 	    n_spin_mutex, n_adaptive_mutex, n_rwlock,
10231d919413Sozaki-r 	    n_null, n_others);
10241d919413Sozaki-r #else
10251d919413Sozaki-r 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
10261d919413Sozaki-r #endif	/* LOCKDEBUG */
10271d919413Sozaki-r }
1028ffdb4a83Schristos #endif /* _KERNEL */
1029b07ec3fcSad #endif	/* DDB */
1030b07ec3fcSad 
1031ffdb4a83Schristos #ifdef _KERNEL
1032b07ec3fcSad /*
10330b3a7eb1Smrg  * lockdebug_dismiss:
10340b3a7eb1Smrg  *
10350b3a7eb1Smrg  *      The system is rebooting, and potentially from an unsafe
10360b3a7eb1Smrg  *      place so avoid any future aborts.
10370b3a7eb1Smrg  */
10380b3a7eb1Smrg void
lockdebug_dismiss(void)10390b3a7eb1Smrg lockdebug_dismiss(void)
10400b3a7eb1Smrg {
10410b3a7eb1Smrg 
10420b3a7eb1Smrg 	atomic_inc_uint_nv(&ld_panic);
10430b3a7eb1Smrg }
10440b3a7eb1Smrg 
10450b3a7eb1Smrg /*
1046b07ec3fcSad  * lockdebug_abort:
1047b07ec3fcSad  *
1048b07ec3fcSad  *	An error has been trapped - dump lock info and call panic().
1049b07ec3fcSad  */
1050b07ec3fcSad void
lockdebug_abort(const char * func,size_t line,const volatile void * lock,lockops_t * ops,const char * msg)1051e7f0067cSchristos lockdebug_abort(const char *func, size_t line, const volatile void *lock,
10529be065fbSchristos     lockops_t *ops, const char *msg)
1053b07ec3fcSad {
1054b07ec3fcSad #ifdef LOCKDEBUG
1055b07ec3fcSad 	lockdebug_t *ld;
1056a4e0004bSad 	int s;
1057b07ec3fcSad 
1058a4e0004bSad 	s = splhigh();
10599be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock,
1060ca70a1c0Srafal 			(uintptr_t) __builtin_return_address(0))) != NULL) {
10619be065fbSchristos 		lockdebug_abort1(func, line, ld, s, msg, true);
1062a4e0004bSad 		return;
1063b07ec3fcSad 	}
1064a4e0004bSad 	splx(s);
1065b07ec3fcSad #endif	/* LOCKDEBUG */
1066b07ec3fcSad 
10677eb6056fSad 	/*
106860b1eff4Smrg 	 * Don't make the situation worse if the system is already going
106960b1eff4Smrg 	 * down in flames.  Once a panic is triggered, lockdebug state
107060b1eff4Smrg 	 * becomes stale and cannot be trusted.
10717eb6056fSad 	 */
107260b1eff4Smrg 	if (atomic_inc_uint_nv(&ld_panic) > 1)
107360b1eff4Smrg 		return;
107460b1eff4Smrg 
10754707b092Sriastradh 	char locksym[128];
10764707b092Sriastradh 
10774707b092Sriastradh #ifdef DDB
1078*e8f73aaeSnakayama 	db_symstr(locksym, sizeof(locksym), (db_expr_t)(intptr_t)lock,
1079*e8f73aaeSnakayama 	    DB_STGY_ANY);
10804707b092Sriastradh #else
10814707b092Sriastradh 	snprintf(locksym, sizeof(locksym), "%#018lx", (unsigned long)lock);
10824707b092Sriastradh #endif
10834707b092Sriastradh 
1084f7a71118Sriastradh 	printf("%s error: %s,%zu: %s\n\n"
10854707b092Sriastradh 	    "lock address : %s\n"
1086b07ec3fcSad 	    "current cpu  : %18d\n"
1087b07ec3fcSad 	    "current lwp  : %#018lx\n",
10884707b092Sriastradh 	    ops->lo_name, func, line, msg, locksym,
1089ac8f6353Srmind 	    (int)cpu_index(curcpu()), (long)curlwp);
1090f7a71118Sriastradh 	(*ops->lo_dump)(lock, printf);
1091f7a71118Sriastradh 	printf("\n");
10927eb6056fSad 
10939be065fbSchristos 	panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
10949be065fbSchristos 	    ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
1095b07ec3fcSad }
1096ffdb4a83Schristos #endif /* _KERNEL */
1097