xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision fab43db2cded2864b84bbb7bd46b88b352d2d1d1)
1*fab43db2Schristos /*	$NetBSD: subr_lockdebug.c,v 1.69 2018/11/03 15:20:03 christos Exp $	*/
2b07ec3fcSad 
3b07ec3fcSad /*-
4057adba1Sad  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5b07ec3fcSad  * All rights reserved.
6b07ec3fcSad  *
7b07ec3fcSad  * This code is derived from software contributed to The NetBSD Foundation
8b07ec3fcSad  * by Andrew Doran.
9b07ec3fcSad  *
10b07ec3fcSad  * Redistribution and use in source and binary forms, with or without
11b07ec3fcSad  * modification, are permitted provided that the following conditions
12b07ec3fcSad  * are met:
13b07ec3fcSad  * 1. Redistributions of source code must retain the above copyright
14b07ec3fcSad  *    notice, this list of conditions and the following disclaimer.
15b07ec3fcSad  * 2. Redistributions in binary form must reproduce the above copyright
16b07ec3fcSad  *    notice, this list of conditions and the following disclaimer in the
17b07ec3fcSad  *    documentation and/or other materials provided with the distribution.
18b07ec3fcSad  *
19b07ec3fcSad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20b07ec3fcSad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21b07ec3fcSad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22b07ec3fcSad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23b07ec3fcSad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24b07ec3fcSad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25b07ec3fcSad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26b07ec3fcSad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27b07ec3fcSad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28b07ec3fcSad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29b07ec3fcSad  * POSSIBILITY OF SUCH DAMAGE.
30b07ec3fcSad  */
31b07ec3fcSad 
32b07ec3fcSad /*
33dde5d75eSad  * Basic lock debugging code shared among lock primitives.
34b07ec3fcSad  */
35b07ec3fcSad 
360ca3d21bSdsl #include <sys/cdefs.h>
37*fab43db2Schristos __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.69 2018/11/03 15:20:03 christos Exp $");
380ca3d21bSdsl 
3905cf8927Sozaki-r #ifdef _KERNEL_OPT
40b07ec3fcSad #include "opt_ddb.h"
4105cf8927Sozaki-r #endif
42b07ec3fcSad 
43b07ec3fcSad #include <sys/param.h>
44b07ec3fcSad #include <sys/proc.h>
45b07ec3fcSad #include <sys/systm.h>
4611dc6399Sad #include <sys/kernel.h>
47b07ec3fcSad #include <sys/kmem.h>
48b07ec3fcSad #include <sys/lockdebug.h>
49b07ec3fcSad #include <sys/sleepq.h>
5011dc6399Sad #include <sys/cpu.h>
51b470ab62Sad #include <sys/atomic.h>
52212c50ddSad #include <sys/lock.h>
5319e6c76bSmatt #include <sys/rbtree.h>
54e611636fSozaki-r #include <sys/ksyms.h>
5538d5e341Syamt 
560664a045Sad #include <machine/lock.h>
570664a045Sad 
58057adba1Sad unsigned int		ld_panic;
59057adba1Sad 
60b07ec3fcSad #ifdef LOCKDEBUG
61b07ec3fcSad 
62b07ec3fcSad #define	LD_BATCH_SHIFT	9
63b07ec3fcSad #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
64b07ec3fcSad #define	LD_BATCH_MASK	(LD_BATCH - 1)
65b07ec3fcSad #define	LD_MAX_LOCKS	1048576
66b07ec3fcSad #define	LD_SLOP		16
67b07ec3fcSad 
68b07ec3fcSad #define	LD_LOCKED	0x01
69b07ec3fcSad #define	LD_SLEEPER	0x02
70b07ec3fcSad 
71461cd942Sad #define	LD_WRITE_LOCK	0x80000000
72461cd942Sad 
73b07ec3fcSad typedef struct lockdebug {
74879d5dfbSrmind 	struct rb_node	ld_rb_node;
75a4e0004bSad 	__cpu_simple_lock_t ld_spinlock;
76b07ec3fcSad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
77b07ec3fcSad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
78b07ec3fcSad 	volatile void	*ld_lock;
79b07ec3fcSad 	lockops_t	*ld_lockops;
80b07ec3fcSad 	struct lwp	*ld_lwp;
81b07ec3fcSad 	uintptr_t	ld_locked;
82b07ec3fcSad 	uintptr_t	ld_unlocked;
8311dc6399Sad 	uintptr_t	ld_initaddr;
84b07ec3fcSad 	uint16_t	ld_shares;
85b07ec3fcSad 	uint16_t	ld_cpu;
86b07ec3fcSad 	uint8_t		ld_flags;
87b07ec3fcSad 	uint8_t		ld_shwant;	/* advisory */
88b07ec3fcSad 	uint8_t		ld_exwant;	/* advisory */
89b07ec3fcSad 	uint8_t		ld_unused;
90b07ec3fcSad } volatile lockdebug_t;
91b07ec3fcSad 
92b07ec3fcSad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
93b07ec3fcSad 
94a4e0004bSad __cpu_simple_lock_t	ld_mod_lk;
9511910619Smatt lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
9611910619Smatt lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
97b07ec3fcSad int			ld_nfree;
98b07ec3fcSad int			ld_freeptr;
99b07ec3fcSad int			ld_recurse;
10064e54fbbSad bool			ld_nomore;
101b07ec3fcSad lockdebug_t		ld_prime[LD_BATCH];
102b07ec3fcSad 
1039be065fbSchristos static void	lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
104a4e0004bSad     const char *, bool);
105a4e0004bSad static int	lockdebug_more(int);
10664e54fbbSad static void	lockdebug_init(void);
107a27531bcSchristos static void	lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
108a27531bcSchristos     __printflike(1, 2));
109b07ec3fcSad 
11038d5e341Syamt static signed int
111879d5dfbSrmind ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
11238d5e341Syamt {
113879d5dfbSrmind 	const lockdebug_t *ld1 = n1;
114879d5dfbSrmind 	const lockdebug_t *ld2 = n2;
11533e66db2Syamt 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
11633e66db2Syamt 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
11733e66db2Syamt 
11833e66db2Syamt 	if (a < b)
11933e66db2Syamt 		return -1;
120879d5dfbSrmind 	if (a > b)
121879d5dfbSrmind 		return 1;
12238d5e341Syamt 	return 0;
12338d5e341Syamt }
12438d5e341Syamt 
12538d5e341Syamt static signed int
126879d5dfbSrmind ld_rbto_compare_key(void *ctx, const void *n, const void *key)
12738d5e341Syamt {
128879d5dfbSrmind 	const lockdebug_t *ld = n;
12933e66db2Syamt 	const uintptr_t a = (uintptr_t)ld->ld_lock;
13033e66db2Syamt 	const uintptr_t b = (uintptr_t)key;
13133e66db2Syamt 
13233e66db2Syamt 	if (a < b)
13333e66db2Syamt 		return -1;
134879d5dfbSrmind 	if (a > b)
135879d5dfbSrmind 		return 1;
13638d5e341Syamt 	return 0;
13738d5e341Syamt }
13838d5e341Syamt 
139879d5dfbSrmind static rb_tree_t ld_rb_tree;
14038d5e341Syamt 
141879d5dfbSrmind static const rb_tree_ops_t ld_rb_tree_ops = {
1425a4f0c6bSmatt 	.rbto_compare_nodes = ld_rbto_compare_nodes,
1435a4f0c6bSmatt 	.rbto_compare_key = ld_rbto_compare_key,
144879d5dfbSrmind 	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
145879d5dfbSrmind 	.rbto_context = NULL
14638d5e341Syamt };
14738d5e341Syamt 
148671754ccSyamt static inline lockdebug_t *
149e7f0067cSchristos lockdebug_lookup1(const volatile void *lock)
150671754ccSyamt {
151671754ccSyamt 	lockdebug_t *ld;
152a4e0004bSad 	struct cpu_info *ci;
153671754ccSyamt 
154a4e0004bSad 	ci = curcpu();
155a4e0004bSad 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
156e7f0067cSchristos 	ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
157a4e0004bSad 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
158a4e0004bSad 	if (ld == NULL) {
159671754ccSyamt 		return NULL;
160a4e0004bSad 	}
161a4e0004bSad 	__cpu_simple_lock(&ld->ld_spinlock);
162671754ccSyamt 
163671754ccSyamt 	return ld;
164671754ccSyamt }
165671754ccSyamt 
166a4e0004bSad static void
167a4e0004bSad lockdebug_lock_cpus(void)
168a4e0004bSad {
169a4e0004bSad 	CPU_INFO_ITERATOR cii;
170a4e0004bSad 	struct cpu_info *ci;
171a4e0004bSad 
172a4e0004bSad 	for (CPU_INFO_FOREACH(cii, ci)) {
173a4e0004bSad 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
174a4e0004bSad 	}
175a4e0004bSad }
176a4e0004bSad 
177a4e0004bSad static void
178a4e0004bSad lockdebug_unlock_cpus(void)
179a4e0004bSad {
180a4e0004bSad 	CPU_INFO_ITERATOR cii;
181a4e0004bSad 	struct cpu_info *ci;
182a4e0004bSad 
183a4e0004bSad 	for (CPU_INFO_FOREACH(cii, ci)) {
184a4e0004bSad 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
185a4e0004bSad 	}
186a4e0004bSad }
187a4e0004bSad 
188b07ec3fcSad /*
189b07ec3fcSad  * lockdebug_lookup:
190b07ec3fcSad  *
19138d5e341Syamt  *	Find a lockdebug structure by a pointer to a lock and return it locked.
192b07ec3fcSad  */
193b07ec3fcSad static inline lockdebug_t *
194e7f0067cSchristos lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
1959be065fbSchristos     uintptr_t where)
196b07ec3fcSad {
19738d5e341Syamt 	lockdebug_t *ld;
198b07ec3fcSad 
199a4e0004bSad 	ld = lockdebug_lookup1(lock);
200d54aad2dSozaki-r 	if (__predict_false(ld == NULL)) {
2019be065fbSchristos 		panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
2029be065fbSchristos 		    PRIxPTR ")", func, line, lock, where);
203879d5dfbSrmind 	}
204b07ec3fcSad 	return ld;
205b07ec3fcSad }
206b07ec3fcSad 
207b07ec3fcSad /*
208b07ec3fcSad  * lockdebug_init:
209b07ec3fcSad  *
210b07ec3fcSad  *	Initialize the lockdebug system.  Allocate an initial pool of
211b07ec3fcSad  *	lockdebug structures before the VM system is up and running.
212b07ec3fcSad  */
21364e54fbbSad static void
214b07ec3fcSad lockdebug_init(void)
215b07ec3fcSad {
216b07ec3fcSad 	lockdebug_t *ld;
217b07ec3fcSad 	int i;
218b07ec3fcSad 
219a4e0004bSad 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
220a4e0004bSad 	TAILQ_INIT(&curlwp->l_ld_locks);
221a4e0004bSad 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
222a4e0004bSad 	__cpu_simple_lock_init(&ld_mod_lk);
2232cab8950Smatt 
22438d5e341Syamt 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
22538d5e341Syamt 
226b07ec3fcSad 	ld = ld_prime;
227b07ec3fcSad 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
228a4e0004bSad 		__cpu_simple_lock_init(&ld->ld_spinlock);
229b07ec3fcSad 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
230b07ec3fcSad 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
231b07ec3fcSad 	}
232b07ec3fcSad 	ld_freeptr = 1;
233b07ec3fcSad 	ld_nfree = LD_BATCH - 1;
234b07ec3fcSad }
235b07ec3fcSad 
236b07ec3fcSad /*
237b07ec3fcSad  * lockdebug_alloc:
238b07ec3fcSad  *
239b07ec3fcSad  *	A lock is being initialized, so allocate an associated debug
240b07ec3fcSad  *	structure.
241b07ec3fcSad  */
24238d5e341Syamt bool
2439be065fbSchristos lockdebug_alloc(const char *func, size_t line, volatile void *lock,
2449be065fbSchristos     lockops_t *lo, uintptr_t initaddr)
245b07ec3fcSad {
246b07ec3fcSad 	struct cpu_info *ci;
247b07ec3fcSad 	lockdebug_t *ld;
248a4e0004bSad 	int s;
249b07ec3fcSad 
250d54aad2dSozaki-r 	if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
25138d5e341Syamt 		return false;
252d54aad2dSozaki-r 	if (__predict_false(ld_freeptr == 0))
25364e54fbbSad 		lockdebug_init();
254b07ec3fcSad 
255a4e0004bSad 	s = splhigh();
256a4e0004bSad 	__cpu_simple_lock(&ld_mod_lk);
257d54aad2dSozaki-r 	if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
258a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
2599be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "already initialized",
2609be065fbSchristos 		    true);
2617eb6056fSad 		return false;
262671754ccSyamt 	}
263671754ccSyamt 
264b07ec3fcSad 	/*
265b07ec3fcSad 	 * Pinch a new debug structure.  We may recurse because we call
266b07ec3fcSad 	 * kmem_alloc(), which may need to initialize new locks somewhere
2675492d866Sskrll 	 * down the path.  If not recursing, we try to maintain at least
268b07ec3fcSad 	 * LD_SLOP structures free, which should hopefully be enough to
269b07ec3fcSad 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
270b07ec3fcSad 	 * worry: we'll just mark the lock as not having an ID.
271b07ec3fcSad 	 */
272461cd942Sad 	ci = curcpu();
273b07ec3fcSad 	ci->ci_lkdebug_recurse++;
274b07ec3fcSad 	if (TAILQ_EMPTY(&ld_free)) {
27564e54fbbSad 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
276b07ec3fcSad 			ci->ci_lkdebug_recurse--;
277a4e0004bSad 			__cpu_simple_unlock(&ld_mod_lk);
278a4e0004bSad 			splx(s);
27938d5e341Syamt 			return false;
280b07ec3fcSad 		}
281a4e0004bSad 		s = lockdebug_more(s);
282a4e0004bSad 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
283a4e0004bSad 		s = lockdebug_more(s);
284a4e0004bSad 	}
285d54aad2dSozaki-r 	if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
286a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
287a4e0004bSad 		splx(s);
28838d5e341Syamt 		return false;
289b07ec3fcSad 	}
290b07ec3fcSad 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
291b07ec3fcSad 	ld_nfree--;
292b07ec3fcSad 	ci->ci_lkdebug_recurse--;
293b07ec3fcSad 
294d54aad2dSozaki-r 	if (__predict_false(ld->ld_lock != NULL)) {
2959be065fbSchristos 		panic("%s,%zu: corrupt table ld %p", func, line, ld);
296a4e0004bSad 	}
297b07ec3fcSad 
298b07ec3fcSad 	/* Initialise the structure. */
299b07ec3fcSad 	ld->ld_lock = lock;
300b07ec3fcSad 	ld->ld_lockops = lo;
301b07ec3fcSad 	ld->ld_locked = 0;
302b07ec3fcSad 	ld->ld_unlocked = 0;
303b07ec3fcSad 	ld->ld_lwp = NULL;
30411dc6399Sad 	ld->ld_initaddr = initaddr;
3057b8f5124Sad 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
306a4e0004bSad 	lockdebug_lock_cpus();
307879d5dfbSrmind 	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
308a4e0004bSad 	lockdebug_unlock_cpus();
309a4e0004bSad 	__cpu_simple_unlock(&ld_mod_lk);
31038d5e341Syamt 
311a4e0004bSad 	splx(s);
31238d5e341Syamt 	return true;
313b07ec3fcSad }
314b07ec3fcSad 
315b07ec3fcSad /*
316b07ec3fcSad  * lockdebug_free:
317b07ec3fcSad  *
318b07ec3fcSad  *	A lock is being destroyed, so release debugging resources.
319b07ec3fcSad  */
320b07ec3fcSad void
3219be065fbSchristos lockdebug_free(const char *func, size_t line, volatile void *lock)
322b07ec3fcSad {
323b07ec3fcSad 	lockdebug_t *ld;
324a4e0004bSad 	int s;
325b07ec3fcSad 
326d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
327b07ec3fcSad 		return;
328b07ec3fcSad 
329a4e0004bSad 	s = splhigh();
330a4e0004bSad 	__cpu_simple_lock(&ld_mod_lk);
3319be065fbSchristos 	ld = lockdebug_lookup(func, line, lock,
3329be065fbSchristos 	    (uintptr_t) __builtin_return_address(0));
333d54aad2dSozaki-r 	if (__predict_false(ld == NULL)) {
334a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
3359be065fbSchristos 		panic("%s,%zu: destroying uninitialized object %p"
3369be065fbSchristos 		    "(ld_lock=%p)", func, line, lock, ld->ld_lock);
3377eb6056fSad 		return;
338b07ec3fcSad 	}
339d54aad2dSozaki-r 	if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
340d54aad2dSozaki-r 	    ld->ld_shares != 0)) {
341a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
3429be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "is locked or in use",
3439be065fbSchristos 		    true);
3447eb6056fSad 		return;
3457eb6056fSad 	}
346a4e0004bSad 	lockdebug_lock_cpus();
347879d5dfbSrmind 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
348a4e0004bSad 	lockdebug_unlock_cpus();
349b07ec3fcSad 	ld->ld_lock = NULL;
350b07ec3fcSad 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
351b07ec3fcSad 	ld_nfree++;
352a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
353a4e0004bSad 	__cpu_simple_unlock(&ld_mod_lk);
354a4e0004bSad 	splx(s);
355b07ec3fcSad }
356b07ec3fcSad 
357b07ec3fcSad /*
358b07ec3fcSad  * lockdebug_more:
359b07ec3fcSad  *
360b07ec3fcSad  *	Allocate a batch of debug structures and add to the free list.
361a4e0004bSad  *	Must be called with ld_mod_lk held.
362b07ec3fcSad  */
363a4e0004bSad static int
364a4e0004bSad lockdebug_more(int s)
365b07ec3fcSad {
366b07ec3fcSad 	lockdebug_t *ld;
367b07ec3fcSad 	void *block;
36864e54fbbSad 	int i, base, m;
369b07ec3fcSad 
3707b8f5124Sad 	/*
3717b8f5124Sad 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
3727b8f5124Sad 	 * deadlock, because we don't know which locks the caller holds.
3737b8f5124Sad 	 */
3744e425594Sozaki-r 	if (cpu_intr_p() || cpu_softintr_p()) {
3757b8f5124Sad 		return s;
3767b8f5124Sad 	}
3777b8f5124Sad 
378b07ec3fcSad 	while (ld_nfree < LD_SLOP) {
379a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
380a4e0004bSad 		splx(s);
381b07ec3fcSad 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
382a4e0004bSad 		s = splhigh();
383a4e0004bSad 		__cpu_simple_lock(&ld_mod_lk);
384b07ec3fcSad 
385b07ec3fcSad 		if (ld_nfree > LD_SLOP) {
386b07ec3fcSad 			/* Somebody beat us to it. */
387a4e0004bSad 			__cpu_simple_unlock(&ld_mod_lk);
388a4e0004bSad 			splx(s);
389b07ec3fcSad 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
390a4e0004bSad 			s = splhigh();
391a4e0004bSad 			__cpu_simple_lock(&ld_mod_lk);
392b07ec3fcSad 			continue;
393b07ec3fcSad 		}
394b07ec3fcSad 
395b07ec3fcSad 		base = ld_freeptr;
396b07ec3fcSad 		ld_nfree += LD_BATCH;
397b07ec3fcSad 		ld = block;
398b07ec3fcSad 		base <<= LD_BATCH_SHIFT;
399d1579b2dSriastradh 		m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
400b07ec3fcSad 
40164e54fbbSad 		if (m == LD_MAX_LOCKS)
40264e54fbbSad 			ld_nomore = true;
40364e54fbbSad 
40464e54fbbSad 		for (i = base; i < m; i++, ld++) {
405a4e0004bSad 			__cpu_simple_lock_init(&ld->ld_spinlock);
406b07ec3fcSad 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
407b07ec3fcSad 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
408b07ec3fcSad 		}
409b07ec3fcSad 
410b470ab62Sad 		membar_producer();
411b07ec3fcSad 	}
412a4e0004bSad 
413a4e0004bSad 	return s;
414b07ec3fcSad }
415b07ec3fcSad 
416b07ec3fcSad /*
417b07ec3fcSad  * lockdebug_wantlock:
418b07ec3fcSad  *
41948e395b1Spgoyette  *	Process the preamble to a lock acquire.  The "shared"
42048e395b1Spgoyette  *	parameter controls which ld_{ex,sh}want counter is
42148e395b1Spgoyette  *	updated; a negative value of shared updates neither.
422b07ec3fcSad  */
423b07ec3fcSad void
4249be065fbSchristos lockdebug_wantlock(const char *func, size_t line,
425e7f0067cSchristos     const volatile void *lock, uintptr_t where, int shared)
426b07ec3fcSad {
427b07ec3fcSad 	struct lwp *l = curlwp;
428b07ec3fcSad 	lockdebug_t *ld;
429dd962f86Sthorpej 	bool recurse;
430a4e0004bSad 	int s;
431b07ec3fcSad 
432b07ec3fcSad 	(void)shared;
4334f3d5a9cSthorpej 	recurse = false;
434b07ec3fcSad 
435d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
436b07ec3fcSad 		return;
437b07ec3fcSad 
438a4e0004bSad 	s = splhigh();
4399be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
440a4e0004bSad 		splx(s);
441b07ec3fcSad 		return;
442a4e0004bSad 	}
443839080f7Syamt 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
444b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
445060c06beSmlelstv 			if (ld->ld_lwp == l)
4464f3d5a9cSthorpej 				recurse = true;
447ac8f6353Srmind 		} else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
4484f3d5a9cSthorpej 			recurse = true;
449b07ec3fcSad 	}
45011dc6399Sad 	if (cpu_intr_p()) {
451d54aad2dSozaki-r 		if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
4529be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
45311dc6399Sad 			    "acquiring sleep lock from interrupt context",
45411dc6399Sad 			    true);
4557eb6056fSad 			return;
4567eb6056fSad 		}
45711dc6399Sad 	}
45848e395b1Spgoyette 	if (shared > 0)
459b07ec3fcSad 		ld->ld_shwant++;
46048e395b1Spgoyette 	else if (shared == 0)
461b07ec3fcSad 		ld->ld_exwant++;
462d54aad2dSozaki-r 	if (__predict_false(recurse)) {
4639be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "locking against myself",
46411dc6399Sad 		    true);
4657eb6056fSad 		return;
4667eb6056fSad 	}
467a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
468a4e0004bSad 	splx(s);
469b07ec3fcSad }
470b07ec3fcSad 
471b07ec3fcSad /*
472b07ec3fcSad  * lockdebug_locked:
473b07ec3fcSad  *
474b07ec3fcSad  *	Process a lock acquire operation.
475b07ec3fcSad  */
476b07ec3fcSad void
4779be065fbSchristos lockdebug_locked(const char *func, size_t line,
4789be065fbSchristos     volatile void *lock, void *cvlock, uintptr_t where, int shared)
479b07ec3fcSad {
480b07ec3fcSad 	struct lwp *l = curlwp;
481b07ec3fcSad 	lockdebug_t *ld;
482a4e0004bSad 	int s;
483b07ec3fcSad 
484d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
485b07ec3fcSad 		return;
486b07ec3fcSad 
487a4e0004bSad 	s = splhigh();
4889be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
489a4e0004bSad 		splx(s);
490b07ec3fcSad 		return;
491a4e0004bSad 	}
4927b8f5124Sad 	if (cvlock) {
4937b8f5124Sad 		KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
4947b8f5124Sad 		if (lock == (void *)&lbolt) {
4957b8f5124Sad 			/* nothing */
4967b8f5124Sad 		} else if (ld->ld_shares++ == 0) {
4977b8f5124Sad 			ld->ld_locked = (uintptr_t)cvlock;
498d54aad2dSozaki-r 		} else if (__predict_false(cvlock != (void *)ld->ld_locked)) {
4999be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
5009be065fbSchristos 			    "multiple locks used with condition variable",
5019be065fbSchristos 			    true);
5027b8f5124Sad 			return;
5037b8f5124Sad 		}
5047b8f5124Sad 	} else if (shared) {
505b07ec3fcSad 		l->l_shlocks++;
5069d109b30Syamt 		ld->ld_locked = where;
507b07ec3fcSad 		ld->ld_shares++;
508b07ec3fcSad 		ld->ld_shwant--;
509b07ec3fcSad 	} else {
510d54aad2dSozaki-r 		if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
5119be065fbSchristos 			lockdebug_abort1(func, line, ld, s, "already locked",
512a4e0004bSad 			    true);
5137eb6056fSad 			return;
5147eb6056fSad 		}
515b07ec3fcSad 		ld->ld_flags |= LD_LOCKED;
516b07ec3fcSad 		ld->ld_locked = where;
517b07ec3fcSad 		ld->ld_exwant--;
518b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
519a4e0004bSad 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
520b07ec3fcSad 		} else {
521a4e0004bSad 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
522a4e0004bSad 			    ld, ld_chain);
523b07ec3fcSad 		}
524b07ec3fcSad 	}
525ac8f6353Srmind 	ld->ld_cpu = (uint16_t)cpu_index(curcpu());
526839080f7Syamt 	ld->ld_lwp = l;
527a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
528a4e0004bSad 	splx(s);
529b07ec3fcSad }
530b07ec3fcSad 
531b07ec3fcSad /*
532b07ec3fcSad  * lockdebug_unlocked:
533b07ec3fcSad  *
534b07ec3fcSad  *	Process a lock release operation.
535b07ec3fcSad  */
536b07ec3fcSad void
5379be065fbSchristos lockdebug_unlocked(const char *func, size_t line,
5389be065fbSchristos     volatile void *lock, uintptr_t where, int shared)
539b07ec3fcSad {
540b07ec3fcSad 	struct lwp *l = curlwp;
541b07ec3fcSad 	lockdebug_t *ld;
542a4e0004bSad 	int s;
543b07ec3fcSad 
544d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
545b07ec3fcSad 		return;
546b07ec3fcSad 
547a4e0004bSad 	s = splhigh();
5489be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
549a4e0004bSad 		splx(s);
550b07ec3fcSad 		return;
551a4e0004bSad 	}
5527b8f5124Sad 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
5537b8f5124Sad 		if (lock == (void *)&lbolt) {
5547b8f5124Sad 			/* nothing */
5557b8f5124Sad 		} else {
5567b8f5124Sad 			ld->ld_shares--;
5577b8f5124Sad 		}
5587b8f5124Sad 	} else if (shared) {
559d54aad2dSozaki-r 		if (__predict_false(l->l_shlocks == 0)) {
5609be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
56111dc6399Sad 			    "no shared locks held by LWP", true);
5627eb6056fSad 			return;
5637eb6056fSad 		}
564d54aad2dSozaki-r 		if (__predict_false(ld->ld_shares == 0)) {
5659be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
56611dc6399Sad 			    "no shared holds on this lock", true);
5677eb6056fSad 			return;
5687eb6056fSad 		}
569b07ec3fcSad 		l->l_shlocks--;
570b07ec3fcSad 		ld->ld_shares--;
5719d109b30Syamt 		if (ld->ld_lwp == l) {
5729d109b30Syamt 			ld->ld_unlocked = where;
573839080f7Syamt 			ld->ld_lwp = NULL;
5749d109b30Syamt 		}
575ac8f6353Srmind 		if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
576839080f7Syamt 			ld->ld_cpu = (uint16_t)-1;
577b07ec3fcSad 	} else {
578d54aad2dSozaki-r 		if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
5799be065fbSchristos 			lockdebug_abort1(func, line, ld, s, "not locked", true);
5807eb6056fSad 			return;
5817eb6056fSad 		}
582b07ec3fcSad 
583b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
584d54aad2dSozaki-r 			if (__predict_false(ld->ld_lwp != curlwp)) {
5859be065fbSchristos 				lockdebug_abort1(func, line, ld, s,
58611dc6399Sad 				    "not held by current LWP", true);
5877eb6056fSad 				return;
5887eb6056fSad 			}
589a4e0004bSad 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
590b07ec3fcSad 		} else {
591d54aad2dSozaki-r 			uint16_t idx = (uint16_t)cpu_index(curcpu());
592d54aad2dSozaki-r 			if (__predict_false(ld->ld_cpu != idx)) {
5939be065fbSchristos 				lockdebug_abort1(func, line, ld, s,
59411dc6399Sad 				    "not held by current CPU", true);
5957eb6056fSad 				return;
5967eb6056fSad 			}
597a4e0004bSad 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
598a4e0004bSad 			    ld_chain);
599b07ec3fcSad 		}
600521a86d5Smatt 		ld->ld_flags &= ~LD_LOCKED;
601521a86d5Smatt 		ld->ld_unlocked = where;
602521a86d5Smatt 		ld->ld_lwp = NULL;
603b07ec3fcSad 	}
604a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
605a4e0004bSad 	splx(s);
606b07ec3fcSad }
607b07ec3fcSad 
608b07ec3fcSad /*
6097b8f5124Sad  * lockdebug_wakeup:
6107b8f5124Sad  *
6117b8f5124Sad  *	Process a wakeup on a condition variable.
6127b8f5124Sad  */
6137b8f5124Sad void
6149be065fbSchristos lockdebug_wakeup(const char *func, size_t line, volatile void *lock,
6159be065fbSchristos     uintptr_t where)
6167b8f5124Sad {
6177b8f5124Sad 	lockdebug_t *ld;
6187b8f5124Sad 	int s;
6197b8f5124Sad 
620d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic || lock == (void *)&lbolt))
6217b8f5124Sad 		return;
6227b8f5124Sad 
6237b8f5124Sad 	s = splhigh();
6247b8f5124Sad 	/* Find the CV... */
6259be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
6267b8f5124Sad 		splx(s);
6277b8f5124Sad 		return;
6287b8f5124Sad 	}
6297b8f5124Sad 	/*
6307b8f5124Sad 	 * If it has any waiters, ensure that they are using the
6317b8f5124Sad 	 * same interlock.
6327b8f5124Sad 	 */
633d54aad2dSozaki-r 	if (__predict_false(ld->ld_shares != 0 &&
634d54aad2dSozaki-r 	    !mutex_owned((kmutex_t *)ld->ld_locked))) {
6359be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "interlocking mutex not "
6367b8f5124Sad 		    "held during wakeup", true);
6377b8f5124Sad 		return;
6387b8f5124Sad 	}
6397b8f5124Sad 	__cpu_simple_unlock(&ld->ld_spinlock);
6407b8f5124Sad 	splx(s);
6417b8f5124Sad }
6427b8f5124Sad 
6437b8f5124Sad /*
644b07ec3fcSad  * lockdebug_barrier:
645b07ec3fcSad  *
646b07ec3fcSad  *	Panic if we hold more than one specified spin lock, and optionally,
647b07ec3fcSad  *	if we hold sleep locks.
648b07ec3fcSad  */
649b07ec3fcSad void
6509be065fbSchristos lockdebug_barrier(const char *func, size_t line, volatile void *spinlock,
6519be065fbSchristos     int slplocks)
652b07ec3fcSad {
653b07ec3fcSad 	struct lwp *l = curlwp;
654b07ec3fcSad 	lockdebug_t *ld;
655a4e0004bSad 	int s;
656b07ec3fcSad 
657d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
658b07ec3fcSad 		return;
659b07ec3fcSad 
660a4e0004bSad 	s = splhigh();
661a4e0004bSad 	if ((l->l_pflag & LP_INTR) == 0) {
662a4e0004bSad 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
663b07ec3fcSad 			if (ld->ld_lock == spinlock) {
664b07ec3fcSad 				continue;
665b07ec3fcSad 			}
666a4e0004bSad 			__cpu_simple_lock(&ld->ld_spinlock);
6679be065fbSchristos 			lockdebug_abort1(func, line, ld, s,
668a4e0004bSad 			    "spin lock held", true);
6697eb6056fSad 			return;
6707eb6056fSad 		}
671b07ec3fcSad 	}
672a4e0004bSad 	if (slplocks) {
673a4e0004bSad 		splx(s);
6747eb6056fSad 		return;
6757eb6056fSad 	}
676d54aad2dSozaki-r 	ld = TAILQ_FIRST(&l->l_ld_locks);
677d54aad2dSozaki-r 	if (__predict_false(ld != NULL)) {
678a4e0004bSad 		__cpu_simple_lock(&ld->ld_spinlock);
6799be065fbSchristos 		lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
680a4e0004bSad 		return;
681b07ec3fcSad 	}
682a4e0004bSad 	splx(s);
683a4e0004bSad 	if (l->l_shlocks != 0) {
684a27531bcSchristos 		TAILQ_FOREACH(ld, &ld_all, ld_achain) {
685a27531bcSchristos 			if (ld->ld_lockops->lo_type == LOCKOPS_CV)
686a27531bcSchristos 				continue;
687a27531bcSchristos 			if (ld->ld_lwp == l)
688a27531bcSchristos 				lockdebug_dump(ld, printf);
689a27531bcSchristos 		}
6909be065fbSchristos 		panic("%s,%zu: holding %d shared locks", func, line,
6919be065fbSchristos 		    l->l_shlocks);
692b07ec3fcSad 	}
693b07ec3fcSad }
694b07ec3fcSad 
695b07ec3fcSad /*
69611dc6399Sad  * lockdebug_mem_check:
69711dc6399Sad  *
69811dc6399Sad  *	Check for in-use locks within a memory region that is
69938d5e341Syamt  *	being freed.
70011dc6399Sad  */
70111dc6399Sad void
7029be065fbSchristos lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
70311dc6399Sad {
70411dc6399Sad 	lockdebug_t *ld;
705a4e0004bSad 	struct cpu_info *ci;
706461cd942Sad 	int s;
70711dc6399Sad 
708d54aad2dSozaki-r 	if (__predict_false(panicstr != NULL || ld_panic))
709ea3f10f7Sad 		return;
710ea3f10f7Sad 
711a4e0004bSad 	s = splhigh();
712a4e0004bSad 	ci = curcpu();
713a4e0004bSad 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
71438d5e341Syamt 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
715461cd942Sad 	if (ld != NULL) {
716461cd942Sad 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
717461cd942Sad 
718d54aad2dSozaki-r 		if (__predict_false((uintptr_t)base > lock))
7199be065fbSchristos 			panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
7209be065fbSchristos 			    func, line, ld, base, sz);
721461cd942Sad 		if (lock >= (uintptr_t)base + sz)
722461cd942Sad 			ld = NULL;
723461cd942Sad 	}
724a4e0004bSad 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
725d54aad2dSozaki-r 	if (__predict_false(ld != NULL)) {
726a4e0004bSad 		__cpu_simple_lock(&ld->ld_spinlock);
7279be065fbSchristos 		lockdebug_abort1(func, line, ld, s,
72811dc6399Sad 		    "allocation contains active lock", !cold);
729a4e0004bSad 		return;
730a4e0004bSad 	}
731a4e0004bSad 	splx(s);
73211dc6399Sad }
73311dc6399Sad 
73411dc6399Sad /*
735b07ec3fcSad  * lockdebug_dump:
736b07ec3fcSad  *
737b07ec3fcSad  *	Dump information about a lock on panic, or for DDB.
738b07ec3fcSad  */
739b07ec3fcSad static void
740a67c3c89Schristos lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
741a67c3c89Schristos     __printflike(1, 2))
742b07ec3fcSad {
743b07ec3fcSad 	int sleeper = (ld->ld_flags & LD_SLEEPER);
744b07ec3fcSad 
745b07ec3fcSad 	(*pr)(
746b07ec3fcSad 	    "lock address : %#018lx type     : %18s\n"
7477b8f5124Sad 	    "initialized  : %#018lx",
7487b8f5124Sad 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
7497b8f5124Sad 	    (long)ld->ld_initaddr);
7507b8f5124Sad 
7517b8f5124Sad 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
7525fb876b9Snjoly 		(*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
7537b8f5124Sad 	} else {
7547b8f5124Sad 		(*pr)("\n"
755b07ec3fcSad 		    "shared holds : %18u exclusive: %18u\n"
756b07ec3fcSad 		    "shares wanted: %18u exclusive: %18u\n"
757b07ec3fcSad 		    "current cpu  : %18u last held: %18u\n"
758b07ec3fcSad 		    "current lwp  : %#018lx last held: %#018lx\n"
759521a86d5Smatt 		    "last locked%c : %#018lx unlocked%c: %#018lx\n",
760b07ec3fcSad 		    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
761b07ec3fcSad 		    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
762ac8f6353Srmind 		    (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
763b07ec3fcSad 		    (long)curlwp, (long)ld->ld_lwp,
764521a86d5Smatt 		    ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
765521a86d5Smatt 		    (long)ld->ld_locked,
766521a86d5Smatt 		    ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
767521a86d5Smatt 		    (long)ld->ld_unlocked);
7687b8f5124Sad 	}
769b07ec3fcSad 
770b07ec3fcSad 	if (ld->ld_lockops->lo_dump != NULL)
771b07ec3fcSad 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
772b07ec3fcSad 
773b07ec3fcSad 	if (sleeper) {
774b07ec3fcSad 		(*pr)("\n");
775b07ec3fcSad 		turnstile_print(ld->ld_lock, pr);
776b07ec3fcSad 	}
777b07ec3fcSad }
778b07ec3fcSad 
779b07ec3fcSad /*
7807eb6056fSad  * lockdebug_abort1:
781b07ec3fcSad  *
7827eb6056fSad  *	An error has been trapped - dump lock info and panic.
783b07ec3fcSad  */
78464e54fbbSad static void
7859be065fbSchristos lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
78611dc6399Sad 		 const char *msg, bool dopanic)
787b07ec3fcSad {
788b07ec3fcSad 
7897eb6056fSad 	/*
790d9ddb522Schristos 	 * Don't make the situation worse if the system is already going
7917eb6056fSad 	 * down in flames.  Once a panic is triggered, lockdebug state
7927eb6056fSad 	 * becomes stale and cannot be trusted.
7937eb6056fSad 	 */
7947eb6056fSad 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
795a4e0004bSad 		__cpu_simple_unlock(&ld->ld_spinlock);
796a4e0004bSad 		splx(s);
7977eb6056fSad 		return;
7987eb6056fSad 	}
7997eb6056fSad 
8009be065fbSchristos 	printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
8019be065fbSchristos 	    func, line, msg);
802b07ec3fcSad 	lockdebug_dump(ld, printf_nolog);
803a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
804a4e0004bSad 	splx(s);
805b07ec3fcSad 	printf_nolog("\n");
80611dc6399Sad 	if (dopanic)
8079be065fbSchristos 		panic("LOCKDEBUG: %s error: %s,%zu: %s",
8089be065fbSchristos 		    ld->ld_lockops->lo_name, func, line, msg);
809b07ec3fcSad }
810b07ec3fcSad 
811b07ec3fcSad #endif	/* LOCKDEBUG */
812b07ec3fcSad 
813b07ec3fcSad /*
814b07ec3fcSad  * lockdebug_lock_print:
815b07ec3fcSad  *
816b07ec3fcSad  *	Handle the DDB 'show lock' command.
817b07ec3fcSad  */
818b07ec3fcSad #ifdef DDB
819e611636fSozaki-r #include <machine/db_machdep.h>
820e611636fSozaki-r #include <ddb/db_interface.h>
821e611636fSozaki-r 
822b07ec3fcSad void
823*fab43db2Schristos lockdebug_lock_print(void *addr,
824*fab43db2Schristos     void (*pr)(const char *, ...) __printflike(1, 2))
825b07ec3fcSad {
826b07ec3fcSad #ifdef LOCKDEBUG
827b07ec3fcSad 	lockdebug_t *ld;
828b07ec3fcSad 
829b07ec3fcSad 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
830648f423cSdyoung 		if (ld->ld_lock == NULL)
831648f423cSdyoung 			continue;
832648f423cSdyoung 		if (addr == NULL || ld->ld_lock == addr) {
833b07ec3fcSad 			lockdebug_dump(ld, pr);
834648f423cSdyoung 			if (addr != NULL)
835b07ec3fcSad 				return;
836b07ec3fcSad 		}
837b07ec3fcSad 	}
838648f423cSdyoung 	if (addr != NULL) {
839648f423cSdyoung 		(*pr)("Sorry, no record of a lock with address %p found.\n",
840648f423cSdyoung 		    addr);
841648f423cSdyoung 	}
842b07ec3fcSad #else
843b07ec3fcSad 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
844b07ec3fcSad #endif	/* LOCKDEBUG */
845b07ec3fcSad }
8461d919413Sozaki-r 
847e611636fSozaki-r #ifdef LOCKDEBUG
848e611636fSozaki-r static void
849*fab43db2Schristos lockdebug_show_one(lockdebug_t *ld, int i,
850*fab43db2Schristos     void (*pr)(const char *, ...) __printflike(1, 2))
851*fab43db2Schristos {
852*fab43db2Schristos 	const char *sym;
853*fab43db2Schristos 
854*fab43db2Schristos 	ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr,
855*fab43db2Schristos 	    KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
856*fab43db2Schristos 	(*pr)("Lock %d (initialized at %s)\n", i++, sym);
857*fab43db2Schristos 	lockdebug_dump(ld, pr);
858*fab43db2Schristos }
859*fab43db2Schristos 
860*fab43db2Schristos static void
861*fab43db2Schristos lockdebug_show_trace(const void *ptr,
862*fab43db2Schristos     void (*pr)(const char *, ...) __printflike(1, 2))
863*fab43db2Schristos {
864*fab43db2Schristos     db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
865*fab43db2Schristos }
866*fab43db2Schristos 
867*fab43db2Schristos static void
868*fab43db2Schristos lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
869*fab43db2Schristos     bool show_trace)
870e611636fSozaki-r {
871e611636fSozaki-r 	struct proc *p;
872e611636fSozaki-r 
873e611636fSozaki-r 	LIST_FOREACH(p, &allproc, p_list) {
874e611636fSozaki-r 		struct lwp *l;
875e611636fSozaki-r 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
876e611636fSozaki-r 			lockdebug_t *ld;
877e611636fSozaki-r 			int i = 0;
878e611636fSozaki-r 			if (TAILQ_EMPTY(&l->l_ld_locks))
879e611636fSozaki-r 				continue;
880e611636fSozaki-r 			(*pr)("Locks held by an LWP (%s):\n",
881e611636fSozaki-r 			    l->l_name ? l->l_name : p->p_comm);
882e611636fSozaki-r 			TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
883*fab43db2Schristos 				lockdebug_show_one(ld, i++, pr);
884e611636fSozaki-r 			}
885*fab43db2Schristos 			if (show_trace)
886*fab43db2Schristos 				lockdebug_show_trace(l, pr);
887e611636fSozaki-r 			(*pr)("\n");
888e611636fSozaki-r 		}
889e611636fSozaki-r 	}
890e611636fSozaki-r }
891e611636fSozaki-r 
892e611636fSozaki-r static void
893*fab43db2Schristos lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2),
894*fab43db2Schristos     bool show_trace)
895e611636fSozaki-r {
896e611636fSozaki-r 	lockdebug_t *ld;
897e611636fSozaki-r 	CPU_INFO_ITERATOR cii;
898e611636fSozaki-r 	struct cpu_info *ci;
899e611636fSozaki-r 
900e611636fSozaki-r 	for (CPU_INFO_FOREACH(cii, ci)) {
901e611636fSozaki-r 		int i = 0;
902e611636fSozaki-r 		if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
903e611636fSozaki-r 			continue;
904e611636fSozaki-r 		(*pr)("Locks held on CPU %u:\n", ci->ci_index);
905e611636fSozaki-r 		TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
906*fab43db2Schristos 			lockdebug_show_one(ld, i++, pr);
907*fab43db2Schristos 			if (show_trace)
9084f6cb4feSmrg #ifdef MULTIPROCESSOR
909*fab43db2Schristos 				lockdebug_show_trace(ci->ci_curlwp, pr);
9104f6cb4feSmrg #else
911*fab43db2Schristos 				lockdebug_show_trace(ci->ci_curlwp, pr);
9124f6cb4feSmrg #endif
913e611636fSozaki-r 			(*pr)("\n");
914e611636fSozaki-r 		}
915e611636fSozaki-r 	}
916e611636fSozaki-r }
917e611636fSozaki-r #endif	/* LOCKDEBUG */
918e611636fSozaki-r 
919e611636fSozaki-r void
920*fab43db2Schristos lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
921*fab43db2Schristos     const char *modif)
922e611636fSozaki-r {
923e611636fSozaki-r #ifdef LOCKDEBUG
924e611636fSozaki-r 	bool show_trace = false;
925e611636fSozaki-r 	if (modif[0] == 't')
926e611636fSozaki-r 		show_trace = true;
927e611636fSozaki-r 
928e611636fSozaki-r 	(*pr)("[Locks tracked through LWPs]\n");
929e611636fSozaki-r 	lockdebug_show_all_locks_lwp(pr, show_trace);
930e611636fSozaki-r 	(*pr)("\n");
931e611636fSozaki-r 
932e611636fSozaki-r 	(*pr)("[Locks tracked through CPUs]\n");
933e611636fSozaki-r 	lockdebug_show_all_locks_cpu(pr, show_trace);
934e611636fSozaki-r 	(*pr)("\n");
935e611636fSozaki-r #else
936e611636fSozaki-r 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
937e611636fSozaki-r #endif	/* LOCKDEBUG */
938e611636fSozaki-r }
939e611636fSozaki-r 
9401d919413Sozaki-r void
941*fab43db2Schristos lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2))
9421d919413Sozaki-r {
9431d919413Sozaki-r #ifdef LOCKDEBUG
9441d919413Sozaki-r 	lockdebug_t *ld;
9451d919413Sozaki-r 	void *_ld;
9461d919413Sozaki-r 	uint32_t n_null = 0;
9471d919413Sozaki-r 	uint32_t n_spin_mutex = 0;
9481d919413Sozaki-r 	uint32_t n_adaptive_mutex = 0;
9491d919413Sozaki-r 	uint32_t n_rwlock = 0;
9501d919413Sozaki-r 	uint32_t n_cv = 0;
9511d919413Sozaki-r 	uint32_t n_others = 0;
9521d919413Sozaki-r 
9531d919413Sozaki-r 	RB_TREE_FOREACH(_ld, &ld_rb_tree) {
9541d919413Sozaki-r 		ld = _ld;
9551d919413Sozaki-r 		if (ld->ld_lock == NULL) {
9561d919413Sozaki-r 			n_null++;
9571d919413Sozaki-r 			continue;
9581d919413Sozaki-r 		}
9591d919413Sozaki-r 		if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
9601d919413Sozaki-r 			n_cv++;
9611d919413Sozaki-r 			continue;
9621d919413Sozaki-r 		}
9631d919413Sozaki-r 		if (ld->ld_lockops->lo_name[0] == 'M') {
9641d919413Sozaki-r 			if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
9651d919413Sozaki-r 				n_adaptive_mutex++;
9661d919413Sozaki-r 			else
9671d919413Sozaki-r 				n_spin_mutex++;
9681d919413Sozaki-r 			continue;
9691d919413Sozaki-r 		}
9701d919413Sozaki-r 		if (ld->ld_lockops->lo_name[0] == 'R') {
9711d919413Sozaki-r 			n_rwlock++;
9721d919413Sozaki-r 			continue;
9731d919413Sozaki-r 		}
9741d919413Sozaki-r 		n_others++;
9751d919413Sozaki-r 	}
9761d919413Sozaki-r 	(*pr)(
9771d919413Sozaki-r 	    "condvar: %u\n"
9781d919413Sozaki-r 	    "spin mutex: %u\n"
9791d919413Sozaki-r 	    "adaptive mutex: %u\n"
9801d919413Sozaki-r 	    "rwlock: %u\n"
9811d919413Sozaki-r 	    "null locks: %u\n"
9821d919413Sozaki-r 	    "others: %u\n",
9831d919413Sozaki-r 	    n_cv,  n_spin_mutex, n_adaptive_mutex, n_rwlock,
9841d919413Sozaki-r 	    n_null, n_others);
9851d919413Sozaki-r #else
9861d919413Sozaki-r 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
9871d919413Sozaki-r #endif	/* LOCKDEBUG */
9881d919413Sozaki-r }
989b07ec3fcSad #endif	/* DDB */
990b07ec3fcSad 
991b07ec3fcSad /*
9920b3a7eb1Smrg  * lockdebug_dismiss:
9930b3a7eb1Smrg  *
9940b3a7eb1Smrg  *      The system is rebooting, and potentially from an unsafe
9950b3a7eb1Smrg  *      place so avoid any future aborts.
9960b3a7eb1Smrg  */
9970b3a7eb1Smrg void
9980b3a7eb1Smrg lockdebug_dismiss(void)
9990b3a7eb1Smrg {
10000b3a7eb1Smrg 
10010b3a7eb1Smrg 	atomic_inc_uint_nv(&ld_panic);
10020b3a7eb1Smrg }
10030b3a7eb1Smrg 
10040b3a7eb1Smrg /*
1005b07ec3fcSad  * lockdebug_abort:
1006b07ec3fcSad  *
1007b07ec3fcSad  *	An error has been trapped - dump lock info and call panic().
1008b07ec3fcSad  */
1009b07ec3fcSad void
1010e7f0067cSchristos lockdebug_abort(const char *func, size_t line, const volatile void *lock,
10119be065fbSchristos     lockops_t *ops, const char *msg)
1012b07ec3fcSad {
1013b07ec3fcSad #ifdef LOCKDEBUG
1014b07ec3fcSad 	lockdebug_t *ld;
1015a4e0004bSad 	int s;
1016b07ec3fcSad 
1017a4e0004bSad 	s = splhigh();
10189be065fbSchristos 	if ((ld = lockdebug_lookup(func, line, lock,
1019ca70a1c0Srafal 			(uintptr_t) __builtin_return_address(0))) != NULL) {
10209be065fbSchristos 		lockdebug_abort1(func, line, ld, s, msg, true);
1021a4e0004bSad 		return;
1022b07ec3fcSad 	}
1023a4e0004bSad 	splx(s);
1024b07ec3fcSad #endif	/* LOCKDEBUG */
1025b07ec3fcSad 
10267eb6056fSad 	/*
102760b1eff4Smrg 	 * Don't make the situation worse if the system is already going
102860b1eff4Smrg 	 * down in flames.  Once a panic is triggered, lockdebug state
102960b1eff4Smrg 	 * becomes stale and cannot be trusted.
10307eb6056fSad 	 */
103160b1eff4Smrg 	if (atomic_inc_uint_nv(&ld_panic) > 1)
103260b1eff4Smrg 		return;
103360b1eff4Smrg 
10349be065fbSchristos 	printf_nolog("%s error: %s,%zu: %s\n\n"
1035b07ec3fcSad 	    "lock address : %#018lx\n"
1036b07ec3fcSad 	    "current cpu  : %18d\n"
1037b07ec3fcSad 	    "current lwp  : %#018lx\n",
10389be065fbSchristos 	    ops->lo_name, func, line, msg, (long)lock,
1039ac8f6353Srmind 	    (int)cpu_index(curcpu()), (long)curlwp);
1040b07ec3fcSad 	(*ops->lo_dump)(lock);
1041b07ec3fcSad 	printf_nolog("\n");
10427eb6056fSad 
10439be065fbSchristos 	panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
10449be065fbSchristos 	    ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
1045b07ec3fcSad }
1046