xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision 05cf8927fd2d24432068fa69a098c4af46bf13aa)
1*05cf8927Sozaki-r /*	$NetBSD: subr_lockdebug.c,v 1.54 2015/09/29 01:44:57 ozaki-r Exp $	*/
2b07ec3fcSad 
3b07ec3fcSad /*-
4057adba1Sad  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5b07ec3fcSad  * All rights reserved.
6b07ec3fcSad  *
7b07ec3fcSad  * This code is derived from software contributed to The NetBSD Foundation
8b07ec3fcSad  * by Andrew Doran.
9b07ec3fcSad  *
10b07ec3fcSad  * Redistribution and use in source and binary forms, with or without
11b07ec3fcSad  * modification, are permitted provided that the following conditions
12b07ec3fcSad  * are met:
13b07ec3fcSad  * 1. Redistributions of source code must retain the above copyright
14b07ec3fcSad  *    notice, this list of conditions and the following disclaimer.
15b07ec3fcSad  * 2. Redistributions in binary form must reproduce the above copyright
16b07ec3fcSad  *    notice, this list of conditions and the following disclaimer in the
17b07ec3fcSad  *    documentation and/or other materials provided with the distribution.
18b07ec3fcSad  *
19b07ec3fcSad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20b07ec3fcSad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21b07ec3fcSad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22b07ec3fcSad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23b07ec3fcSad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24b07ec3fcSad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25b07ec3fcSad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26b07ec3fcSad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27b07ec3fcSad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28b07ec3fcSad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29b07ec3fcSad  * POSSIBILITY OF SUCH DAMAGE.
30b07ec3fcSad  */
31b07ec3fcSad 
32b07ec3fcSad /*
33dde5d75eSad  * Basic lock debugging code shared among lock primitives.
34b07ec3fcSad  */
35b07ec3fcSad 
360ca3d21bSdsl #include <sys/cdefs.h>
37*05cf8927Sozaki-r __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.54 2015/09/29 01:44:57 ozaki-r Exp $");
380ca3d21bSdsl 
39*05cf8927Sozaki-r #ifdef _KERNEL_OPT
40b07ec3fcSad #include "opt_ddb.h"
41*05cf8927Sozaki-r #endif
42b07ec3fcSad 
43b07ec3fcSad #include <sys/param.h>
44b07ec3fcSad #include <sys/proc.h>
45b07ec3fcSad #include <sys/systm.h>
4611dc6399Sad #include <sys/kernel.h>
47b07ec3fcSad #include <sys/kmem.h>
48b07ec3fcSad #include <sys/lockdebug.h>
49b07ec3fcSad #include <sys/sleepq.h>
5011dc6399Sad #include <sys/cpu.h>
51b470ab62Sad #include <sys/atomic.h>
52212c50ddSad #include <sys/lock.h>
5319e6c76bSmatt #include <sys/rbtree.h>
5438d5e341Syamt 
550664a045Sad #include <machine/lock.h>
560664a045Sad 
57057adba1Sad unsigned int		ld_panic;
58057adba1Sad 
59b07ec3fcSad #ifdef LOCKDEBUG
60b07ec3fcSad 
61b07ec3fcSad #define	LD_BATCH_SHIFT	9
62b07ec3fcSad #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
63b07ec3fcSad #define	LD_BATCH_MASK	(LD_BATCH - 1)
64b07ec3fcSad #define	LD_MAX_LOCKS	1048576
65b07ec3fcSad #define	LD_SLOP		16
66b07ec3fcSad 
67b07ec3fcSad #define	LD_LOCKED	0x01
68b07ec3fcSad #define	LD_SLEEPER	0x02
69b07ec3fcSad 
70461cd942Sad #define	LD_WRITE_LOCK	0x80000000
71461cd942Sad 
72b07ec3fcSad typedef struct lockdebug {
73879d5dfbSrmind 	struct rb_node	ld_rb_node;
74a4e0004bSad 	__cpu_simple_lock_t ld_spinlock;
75b07ec3fcSad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
76b07ec3fcSad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
77b07ec3fcSad 	volatile void	*ld_lock;
78b07ec3fcSad 	lockops_t	*ld_lockops;
79b07ec3fcSad 	struct lwp	*ld_lwp;
80b07ec3fcSad 	uintptr_t	ld_locked;
81b07ec3fcSad 	uintptr_t	ld_unlocked;
8211dc6399Sad 	uintptr_t	ld_initaddr;
83b07ec3fcSad 	uint16_t	ld_shares;
84b07ec3fcSad 	uint16_t	ld_cpu;
85b07ec3fcSad 	uint8_t		ld_flags;
86b07ec3fcSad 	uint8_t		ld_shwant;	/* advisory */
87b07ec3fcSad 	uint8_t		ld_exwant;	/* advisory */
88b07ec3fcSad 	uint8_t		ld_unused;
89b07ec3fcSad } volatile lockdebug_t;
90b07ec3fcSad 
91b07ec3fcSad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
92b07ec3fcSad 
93a4e0004bSad __cpu_simple_lock_t	ld_mod_lk;
9411910619Smatt lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
9511910619Smatt lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
96b07ec3fcSad int			ld_nfree;
97b07ec3fcSad int			ld_freeptr;
98b07ec3fcSad int			ld_recurse;
9964e54fbbSad bool			ld_nomore;
100b07ec3fcSad lockdebug_t		ld_prime[LD_BATCH];
101b07ec3fcSad 
102a4e0004bSad static void	lockdebug_abort1(lockdebug_t *, int, const char *,
103a4e0004bSad 				 const char *, bool);
104a4e0004bSad static int	lockdebug_more(int);
10564e54fbbSad static void	lockdebug_init(void);
106a27531bcSchristos static void	lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
107a27531bcSchristos     __printflike(1, 2));
108b07ec3fcSad 
10938d5e341Syamt static signed int
110879d5dfbSrmind ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
11138d5e341Syamt {
112879d5dfbSrmind 	const lockdebug_t *ld1 = n1;
113879d5dfbSrmind 	const lockdebug_t *ld2 = n2;
11433e66db2Syamt 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
11533e66db2Syamt 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
11633e66db2Syamt 
11733e66db2Syamt 	if (a < b)
11833e66db2Syamt 		return -1;
119879d5dfbSrmind 	if (a > b)
120879d5dfbSrmind 		return 1;
12138d5e341Syamt 	return 0;
12238d5e341Syamt }
12338d5e341Syamt 
12438d5e341Syamt static signed int
125879d5dfbSrmind ld_rbto_compare_key(void *ctx, const void *n, const void *key)
12638d5e341Syamt {
127879d5dfbSrmind 	const lockdebug_t *ld = n;
12833e66db2Syamt 	const uintptr_t a = (uintptr_t)ld->ld_lock;
12933e66db2Syamt 	const uintptr_t b = (uintptr_t)key;
13033e66db2Syamt 
13133e66db2Syamt 	if (a < b)
13233e66db2Syamt 		return -1;
133879d5dfbSrmind 	if (a > b)
134879d5dfbSrmind 		return 1;
13538d5e341Syamt 	return 0;
13638d5e341Syamt }
13738d5e341Syamt 
138879d5dfbSrmind static rb_tree_t ld_rb_tree;
13938d5e341Syamt 
140879d5dfbSrmind static const rb_tree_ops_t ld_rb_tree_ops = {
1415a4f0c6bSmatt 	.rbto_compare_nodes = ld_rbto_compare_nodes,
1425a4f0c6bSmatt 	.rbto_compare_key = ld_rbto_compare_key,
143879d5dfbSrmind 	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
144879d5dfbSrmind 	.rbto_context = NULL
14538d5e341Syamt };
14638d5e341Syamt 
147671754ccSyamt static inline lockdebug_t *
148a4e0004bSad lockdebug_lookup1(volatile void *lock)
149671754ccSyamt {
150671754ccSyamt 	lockdebug_t *ld;
151a4e0004bSad 	struct cpu_info *ci;
152671754ccSyamt 
153a4e0004bSad 	ci = curcpu();
154a4e0004bSad 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
155671754ccSyamt 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
156a4e0004bSad 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
157a4e0004bSad 	if (ld == NULL) {
158671754ccSyamt 		return NULL;
159a4e0004bSad 	}
160a4e0004bSad 	__cpu_simple_lock(&ld->ld_spinlock);
161671754ccSyamt 
162671754ccSyamt 	return ld;
163671754ccSyamt }
164671754ccSyamt 
165a4e0004bSad static void
166a4e0004bSad lockdebug_lock_cpus(void)
167a4e0004bSad {
168a4e0004bSad 	CPU_INFO_ITERATOR cii;
169a4e0004bSad 	struct cpu_info *ci;
170a4e0004bSad 
171a4e0004bSad 	for (CPU_INFO_FOREACH(cii, ci)) {
172a4e0004bSad 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
173a4e0004bSad 	}
174a4e0004bSad }
175a4e0004bSad 
176a4e0004bSad static void
177a4e0004bSad lockdebug_unlock_cpus(void)
178a4e0004bSad {
179a4e0004bSad 	CPU_INFO_ITERATOR cii;
180a4e0004bSad 	struct cpu_info *ci;
181a4e0004bSad 
182a4e0004bSad 	for (CPU_INFO_FOREACH(cii, ci)) {
183a4e0004bSad 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
184a4e0004bSad 	}
185a4e0004bSad }
186a4e0004bSad 
187b07ec3fcSad /*
188b07ec3fcSad  * lockdebug_lookup:
189b07ec3fcSad  *
19038d5e341Syamt  *	Find a lockdebug structure by a pointer to a lock and return it locked.
191b07ec3fcSad  */
192b07ec3fcSad static inline lockdebug_t *
193ca70a1c0Srafal lockdebug_lookup(volatile void *lock, uintptr_t where)
194b07ec3fcSad {
19538d5e341Syamt 	lockdebug_t *ld;
196b07ec3fcSad 
197a4e0004bSad 	ld = lockdebug_lookup1(lock);
198879d5dfbSrmind 	if (ld == NULL) {
199879d5dfbSrmind 		panic("lockdebug_lookup: uninitialized lock "
200879d5dfbSrmind 		    "(lock=%p, from=%08"PRIxPTR")", lock, where);
201879d5dfbSrmind 	}
202b07ec3fcSad 	return ld;
203b07ec3fcSad }
204b07ec3fcSad 
205b07ec3fcSad /*
206b07ec3fcSad  * lockdebug_init:
207b07ec3fcSad  *
208b07ec3fcSad  *	Initialize the lockdebug system.  Allocate an initial pool of
209b07ec3fcSad  *	lockdebug structures before the VM system is up and running.
210b07ec3fcSad  */
21164e54fbbSad static void
212b07ec3fcSad lockdebug_init(void)
213b07ec3fcSad {
214b07ec3fcSad 	lockdebug_t *ld;
215b07ec3fcSad 	int i;
216b07ec3fcSad 
217a4e0004bSad 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
218a4e0004bSad 	TAILQ_INIT(&curlwp->l_ld_locks);
219a4e0004bSad 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
220a4e0004bSad 	__cpu_simple_lock_init(&ld_mod_lk);
2212cab8950Smatt 
22238d5e341Syamt 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
22338d5e341Syamt 
224b07ec3fcSad 	ld = ld_prime;
225b07ec3fcSad 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
226a4e0004bSad 		__cpu_simple_lock_init(&ld->ld_spinlock);
227b07ec3fcSad 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
228b07ec3fcSad 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
229b07ec3fcSad 	}
230b07ec3fcSad 	ld_freeptr = 1;
231b07ec3fcSad 	ld_nfree = LD_BATCH - 1;
232b07ec3fcSad }
233b07ec3fcSad 
234b07ec3fcSad /*
235b07ec3fcSad  * lockdebug_alloc:
236b07ec3fcSad  *
237b07ec3fcSad  *	A lock is being initialized, so allocate an associated debug
238b07ec3fcSad  *	structure.
239b07ec3fcSad  */
24038d5e341Syamt bool
24111dc6399Sad lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
242b07ec3fcSad {
243b07ec3fcSad 	struct cpu_info *ci;
244b07ec3fcSad 	lockdebug_t *ld;
245a4e0004bSad 	int s;
246b07ec3fcSad 
2477eb6056fSad 	if (lo == NULL || panicstr != NULL || ld_panic)
24838d5e341Syamt 		return false;
24964e54fbbSad 	if (ld_freeptr == 0)
25064e54fbbSad 		lockdebug_init();
251b07ec3fcSad 
252a4e0004bSad 	s = splhigh();
253a4e0004bSad 	__cpu_simple_lock(&ld_mod_lk);
254a4e0004bSad 	if ((ld = lockdebug_lookup1(lock)) != NULL) {
255a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
256a4e0004bSad 		lockdebug_abort1(ld, s, __func__, "already initialized", true);
2577eb6056fSad 		return false;
258671754ccSyamt 	}
259671754ccSyamt 
260b07ec3fcSad 	/*
261b07ec3fcSad 	 * Pinch a new debug structure.  We may recurse because we call
262b07ec3fcSad 	 * kmem_alloc(), which may need to initialize new locks somewhere
2635492d866Sskrll 	 * down the path.  If not recursing, we try to maintain at least
264b07ec3fcSad 	 * LD_SLOP structures free, which should hopefully be enough to
265b07ec3fcSad 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
266b07ec3fcSad 	 * worry: we'll just mark the lock as not having an ID.
267b07ec3fcSad 	 */
268461cd942Sad 	ci = curcpu();
269b07ec3fcSad 	ci->ci_lkdebug_recurse++;
270b07ec3fcSad 	if (TAILQ_EMPTY(&ld_free)) {
27164e54fbbSad 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
272b07ec3fcSad 			ci->ci_lkdebug_recurse--;
273a4e0004bSad 			__cpu_simple_unlock(&ld_mod_lk);
274a4e0004bSad 			splx(s);
27538d5e341Syamt 			return false;
276b07ec3fcSad 		}
277a4e0004bSad 		s = lockdebug_more(s);
278a4e0004bSad 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
279a4e0004bSad 		s = lockdebug_more(s);
280a4e0004bSad 	}
281b07ec3fcSad 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
282a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
283a4e0004bSad 		splx(s);
28438d5e341Syamt 		return false;
285b07ec3fcSad 	}
286b07ec3fcSad 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
287b07ec3fcSad 	ld_nfree--;
288b07ec3fcSad 	ci->ci_lkdebug_recurse--;
289b07ec3fcSad 
290a4e0004bSad 	if (ld->ld_lock != NULL) {
291bbe15520Smatt 		panic("lockdebug_alloc: corrupt table ld %p", ld);
292a4e0004bSad 	}
293b07ec3fcSad 
294b07ec3fcSad 	/* Initialise the structure. */
295b07ec3fcSad 	ld->ld_lock = lock;
296b07ec3fcSad 	ld->ld_lockops = lo;
297b07ec3fcSad 	ld->ld_locked = 0;
298b07ec3fcSad 	ld->ld_unlocked = 0;
299b07ec3fcSad 	ld->ld_lwp = NULL;
30011dc6399Sad 	ld->ld_initaddr = initaddr;
3017b8f5124Sad 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
302a4e0004bSad 	lockdebug_lock_cpus();
303879d5dfbSrmind 	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
304a4e0004bSad 	lockdebug_unlock_cpus();
305a4e0004bSad 	__cpu_simple_unlock(&ld_mod_lk);
30638d5e341Syamt 
307a4e0004bSad 	splx(s);
30838d5e341Syamt 	return true;
309b07ec3fcSad }
310b07ec3fcSad 
311b07ec3fcSad /*
312b07ec3fcSad  * lockdebug_free:
313b07ec3fcSad  *
314b07ec3fcSad  *	A lock is being destroyed, so release debugging resources.
315b07ec3fcSad  */
316b07ec3fcSad void
31738d5e341Syamt lockdebug_free(volatile void *lock)
318b07ec3fcSad {
319b07ec3fcSad 	lockdebug_t *ld;
320a4e0004bSad 	int s;
321b07ec3fcSad 
3227eb6056fSad 	if (panicstr != NULL || ld_panic)
323b07ec3fcSad 		return;
324b07ec3fcSad 
325a4e0004bSad 	s = splhigh();
326a4e0004bSad 	__cpu_simple_lock(&ld_mod_lk);
327ca70a1c0Srafal 	ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
32838d5e341Syamt 	if (ld == NULL) {
329a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
3307b8f5124Sad 		panic("lockdebug_free: destroying uninitialized object %p"
33138d5e341Syamt 		    "(ld_lock=%p)", lock, ld->ld_lock);
3327eb6056fSad 		return;
333b07ec3fcSad 	}
3347eb6056fSad 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
335a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
3367b8f5124Sad 		lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
3377eb6056fSad 		return;
3387eb6056fSad 	}
339a4e0004bSad 	lockdebug_lock_cpus();
340879d5dfbSrmind 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
341a4e0004bSad 	lockdebug_unlock_cpus();
342b07ec3fcSad 	ld->ld_lock = NULL;
343b07ec3fcSad 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
344b07ec3fcSad 	ld_nfree++;
345a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
346a4e0004bSad 	__cpu_simple_unlock(&ld_mod_lk);
347a4e0004bSad 	splx(s);
348b07ec3fcSad }
349b07ec3fcSad 
350b07ec3fcSad /*
351b07ec3fcSad  * lockdebug_more:
352b07ec3fcSad  *
353b07ec3fcSad  *	Allocate a batch of debug structures and add to the free list.
354a4e0004bSad  *	Must be called with ld_mod_lk held.
355b07ec3fcSad  */
356a4e0004bSad static int
357a4e0004bSad lockdebug_more(int s)
358b07ec3fcSad {
359b07ec3fcSad 	lockdebug_t *ld;
360b07ec3fcSad 	void *block;
36164e54fbbSad 	int i, base, m;
362b07ec3fcSad 
3637b8f5124Sad 	/*
3647b8f5124Sad 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
3657b8f5124Sad 	 * deadlock, because we don't know which locks the caller holds.
3667b8f5124Sad 	 */
3677b8f5124Sad 	if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
3687b8f5124Sad 		return s;
3697b8f5124Sad 	}
3707b8f5124Sad 
371b07ec3fcSad 	while (ld_nfree < LD_SLOP) {
372a4e0004bSad 		__cpu_simple_unlock(&ld_mod_lk);
373a4e0004bSad 		splx(s);
374b07ec3fcSad 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
375a4e0004bSad 		s = splhigh();
376a4e0004bSad 		__cpu_simple_lock(&ld_mod_lk);
377b07ec3fcSad 
378b07ec3fcSad 		if (block == NULL)
379a4e0004bSad 			return s;
380b07ec3fcSad 
381b07ec3fcSad 		if (ld_nfree > LD_SLOP) {
382b07ec3fcSad 			/* Somebody beat us to it. */
383a4e0004bSad 			__cpu_simple_unlock(&ld_mod_lk);
384a4e0004bSad 			splx(s);
385b07ec3fcSad 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
386a4e0004bSad 			s = splhigh();
387a4e0004bSad 			__cpu_simple_lock(&ld_mod_lk);
388b07ec3fcSad 			continue;
389b07ec3fcSad 		}
390b07ec3fcSad 
391b07ec3fcSad 		base = ld_freeptr;
392b07ec3fcSad 		ld_nfree += LD_BATCH;
393b07ec3fcSad 		ld = block;
394b07ec3fcSad 		base <<= LD_BATCH_SHIFT;
39564e54fbbSad 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
396b07ec3fcSad 
39764e54fbbSad 		if (m == LD_MAX_LOCKS)
39864e54fbbSad 			ld_nomore = true;
39964e54fbbSad 
40064e54fbbSad 		for (i = base; i < m; i++, ld++) {
401a4e0004bSad 			__cpu_simple_lock_init(&ld->ld_spinlock);
402b07ec3fcSad 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
403b07ec3fcSad 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
404b07ec3fcSad 		}
405b07ec3fcSad 
406b470ab62Sad 		membar_producer();
407b07ec3fcSad 	}
408a4e0004bSad 
409a4e0004bSad 	return s;
410b07ec3fcSad }
411b07ec3fcSad 
412b07ec3fcSad /*
413b07ec3fcSad  * lockdebug_wantlock:
414b07ec3fcSad  *
415b07ec3fcSad  *	Process the preamble to a lock acquire.
416b07ec3fcSad  */
417b07ec3fcSad void
418060c06beSmlelstv lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
419b07ec3fcSad {
420b07ec3fcSad 	struct lwp *l = curlwp;
421b07ec3fcSad 	lockdebug_t *ld;
422dd962f86Sthorpej 	bool recurse;
423a4e0004bSad 	int s;
424b07ec3fcSad 
425b07ec3fcSad 	(void)shared;
4264f3d5a9cSthorpej 	recurse = false;
427b07ec3fcSad 
4287eb6056fSad 	if (panicstr != NULL || ld_panic)
429b07ec3fcSad 		return;
430b07ec3fcSad 
431a4e0004bSad 	s = splhigh();
432ca70a1c0Srafal 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
433a4e0004bSad 		splx(s);
434b07ec3fcSad 		return;
435a4e0004bSad 	}
436839080f7Syamt 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
437b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
438060c06beSmlelstv 			if (ld->ld_lwp == l)
4394f3d5a9cSthorpej 				recurse = true;
440ac8f6353Srmind 		} else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
4414f3d5a9cSthorpej 			recurse = true;
442b07ec3fcSad 	}
44311dc6399Sad 	if (cpu_intr_p()) {
4447eb6056fSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
445a4e0004bSad 			lockdebug_abort1(ld, s, __func__,
44611dc6399Sad 			    "acquiring sleep lock from interrupt context",
44711dc6399Sad 			    true);
4487eb6056fSad 			return;
4497eb6056fSad 		}
45011dc6399Sad 	}
451b07ec3fcSad 	if (shared)
452b07ec3fcSad 		ld->ld_shwant++;
453b07ec3fcSad 	else
454b07ec3fcSad 		ld->ld_exwant++;
4557eb6056fSad 	if (recurse) {
456a4e0004bSad 		lockdebug_abort1(ld, s, __func__, "locking against myself",
45711dc6399Sad 		    true);
4587eb6056fSad 		return;
4597eb6056fSad 	}
460a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
461a4e0004bSad 	splx(s);
462b07ec3fcSad }
463b07ec3fcSad 
464b07ec3fcSad /*
465b07ec3fcSad  * lockdebug_locked:
466b07ec3fcSad  *
467b07ec3fcSad  *	Process a lock acquire operation.
468b07ec3fcSad  */
469b07ec3fcSad void
4707b8f5124Sad lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
4717b8f5124Sad 		 int shared)
472b07ec3fcSad {
473b07ec3fcSad 	struct lwp *l = curlwp;
474b07ec3fcSad 	lockdebug_t *ld;
475a4e0004bSad 	int s;
476b07ec3fcSad 
4777eb6056fSad 	if (panicstr != NULL || ld_panic)
478b07ec3fcSad 		return;
479b07ec3fcSad 
480a4e0004bSad 	s = splhigh();
481ca70a1c0Srafal 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
482a4e0004bSad 		splx(s);
483b07ec3fcSad 		return;
484a4e0004bSad 	}
4857b8f5124Sad 	if (cvlock) {
4867b8f5124Sad 		KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
4877b8f5124Sad 		if (lock == (void *)&lbolt) {
4887b8f5124Sad 			/* nothing */
4897b8f5124Sad 		} else if (ld->ld_shares++ == 0) {
4907b8f5124Sad 			ld->ld_locked = (uintptr_t)cvlock;
4917b8f5124Sad 		} else if (cvlock != (void *)ld->ld_locked) {
4927b8f5124Sad 			lockdebug_abort1(ld, s, __func__, "multiple locks used"
4937b8f5124Sad 			    " with condition variable", true);
4947b8f5124Sad 			return;
4957b8f5124Sad 		}
4967b8f5124Sad 	} else if (shared) {
497b07ec3fcSad 		l->l_shlocks++;
4989d109b30Syamt 		ld->ld_locked = where;
499b07ec3fcSad 		ld->ld_shares++;
500b07ec3fcSad 		ld->ld_shwant--;
501b07ec3fcSad 	} else {
5027eb6056fSad 		if ((ld->ld_flags & LD_LOCKED) != 0) {
503a4e0004bSad 			lockdebug_abort1(ld, s, __func__, "already locked",
504a4e0004bSad 			    true);
5057eb6056fSad 			return;
5067eb6056fSad 		}
507b07ec3fcSad 		ld->ld_flags |= LD_LOCKED;
508b07ec3fcSad 		ld->ld_locked = where;
509b07ec3fcSad 		ld->ld_exwant--;
510b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
511a4e0004bSad 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
512b07ec3fcSad 		} else {
513a4e0004bSad 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
514a4e0004bSad 			    ld, ld_chain);
515b07ec3fcSad 		}
516b07ec3fcSad 	}
517ac8f6353Srmind 	ld->ld_cpu = (uint16_t)cpu_index(curcpu());
518839080f7Syamt 	ld->ld_lwp = l;
519a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
520a4e0004bSad 	splx(s);
521b07ec3fcSad }
522b07ec3fcSad 
523b07ec3fcSad /*
524b07ec3fcSad  * lockdebug_unlocked:
525b07ec3fcSad  *
526b07ec3fcSad  *	Process a lock release operation.
527b07ec3fcSad  */
528b07ec3fcSad void
52938d5e341Syamt lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
530b07ec3fcSad {
531b07ec3fcSad 	struct lwp *l = curlwp;
532b07ec3fcSad 	lockdebug_t *ld;
533a4e0004bSad 	int s;
534b07ec3fcSad 
5357eb6056fSad 	if (panicstr != NULL || ld_panic)
536b07ec3fcSad 		return;
537b07ec3fcSad 
538a4e0004bSad 	s = splhigh();
539ca70a1c0Srafal 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
540a4e0004bSad 		splx(s);
541b07ec3fcSad 		return;
542a4e0004bSad 	}
5437b8f5124Sad 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
5447b8f5124Sad 		if (lock == (void *)&lbolt) {
5457b8f5124Sad 			/* nothing */
5467b8f5124Sad 		} else {
5477b8f5124Sad 			ld->ld_shares--;
5487b8f5124Sad 		}
5497b8f5124Sad 	} else if (shared) {
5507eb6056fSad 		if (l->l_shlocks == 0) {
551a4e0004bSad 			lockdebug_abort1(ld, s, __func__,
55211dc6399Sad 			    "no shared locks held by LWP", true);
5537eb6056fSad 			return;
5547eb6056fSad 		}
5557eb6056fSad 		if (ld->ld_shares == 0) {
556a4e0004bSad 			lockdebug_abort1(ld, s, __func__,
55711dc6399Sad 			    "no shared holds on this lock", true);
5587eb6056fSad 			return;
5597eb6056fSad 		}
560b07ec3fcSad 		l->l_shlocks--;
561b07ec3fcSad 		ld->ld_shares--;
5629d109b30Syamt 		if (ld->ld_lwp == l) {
5639d109b30Syamt 			ld->ld_unlocked = where;
564839080f7Syamt 			ld->ld_lwp = NULL;
5659d109b30Syamt 		}
566ac8f6353Srmind 		if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
567839080f7Syamt 			ld->ld_cpu = (uint16_t)-1;
568b07ec3fcSad 	} else {
5697eb6056fSad 		if ((ld->ld_flags & LD_LOCKED) == 0) {
570a4e0004bSad 			lockdebug_abort1(ld, s, __func__, "not locked", true);
5717eb6056fSad 			return;
5727eb6056fSad 		}
573b07ec3fcSad 
574b07ec3fcSad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
5757eb6056fSad 			if (ld->ld_lwp != curlwp) {
576a4e0004bSad 				lockdebug_abort1(ld, s, __func__,
57711dc6399Sad 				    "not held by current LWP", true);
5787eb6056fSad 				return;
5797eb6056fSad 			}
580a4e0004bSad 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
581b07ec3fcSad 		} else {
582ac8f6353Srmind 			if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
583a4e0004bSad 				lockdebug_abort1(ld, s, __func__,
58411dc6399Sad 				    "not held by current CPU", true);
5857eb6056fSad 				return;
5867eb6056fSad 			}
587a4e0004bSad 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
588a4e0004bSad 			    ld_chain);
589b07ec3fcSad 		}
590521a86d5Smatt 		ld->ld_flags &= ~LD_LOCKED;
591521a86d5Smatt 		ld->ld_unlocked = where;
592521a86d5Smatt 		ld->ld_lwp = NULL;
593b07ec3fcSad 	}
594a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
595a4e0004bSad 	splx(s);
596b07ec3fcSad }
597b07ec3fcSad 
598b07ec3fcSad /*
5997b8f5124Sad  * lockdebug_wakeup:
6007b8f5124Sad  *
6017b8f5124Sad  *	Process a wakeup on a condition variable.
6027b8f5124Sad  */
6037b8f5124Sad void
6047b8f5124Sad lockdebug_wakeup(volatile void *lock, uintptr_t where)
6057b8f5124Sad {
6067b8f5124Sad 	lockdebug_t *ld;
6077b8f5124Sad 	int s;
6087b8f5124Sad 
6097b8f5124Sad 	if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
6107b8f5124Sad 		return;
6117b8f5124Sad 
6127b8f5124Sad 	s = splhigh();
6137b8f5124Sad 	/* Find the CV... */
614ca70a1c0Srafal 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
6157b8f5124Sad 		splx(s);
6167b8f5124Sad 		return;
6177b8f5124Sad 	}
6187b8f5124Sad 	/*
6197b8f5124Sad 	 * If it has any waiters, ensure that they are using the
6207b8f5124Sad 	 * same interlock.
6217b8f5124Sad 	 */
6227b8f5124Sad 	if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
6237b8f5124Sad 		lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
6247b8f5124Sad 		    "held during wakeup", true);
6257b8f5124Sad 		return;
6267b8f5124Sad 	}
6277b8f5124Sad 	__cpu_simple_unlock(&ld->ld_spinlock);
6287b8f5124Sad 	splx(s);
6297b8f5124Sad }
6307b8f5124Sad 
6317b8f5124Sad /*
632b07ec3fcSad  * lockdebug_barrier:
633b07ec3fcSad  *
634b07ec3fcSad  *	Panic if we hold more than one specified spin lock, and optionally,
635b07ec3fcSad  *	if we hold sleep locks.
636b07ec3fcSad  */
637b07ec3fcSad void
638b07ec3fcSad lockdebug_barrier(volatile void *spinlock, int slplocks)
639b07ec3fcSad {
640b07ec3fcSad 	struct lwp *l = curlwp;
641b07ec3fcSad 	lockdebug_t *ld;
642a4e0004bSad 	int s;
643b07ec3fcSad 
6447eb6056fSad 	if (panicstr != NULL || ld_panic)
645b07ec3fcSad 		return;
646b07ec3fcSad 
647a4e0004bSad 	s = splhigh();
648a4e0004bSad 	if ((l->l_pflag & LP_INTR) == 0) {
649a4e0004bSad 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
650b07ec3fcSad 			if (ld->ld_lock == spinlock) {
651b07ec3fcSad 				continue;
652b07ec3fcSad 			}
653a4e0004bSad 			__cpu_simple_lock(&ld->ld_spinlock);
654a4e0004bSad 			lockdebug_abort1(ld, s, __func__,
655a4e0004bSad 			    "spin lock held", true);
6567eb6056fSad 			return;
6577eb6056fSad 		}
658b07ec3fcSad 	}
659a4e0004bSad 	if (slplocks) {
660a4e0004bSad 		splx(s);
6617eb6056fSad 		return;
6627eb6056fSad 	}
663a4e0004bSad 	if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
664a4e0004bSad 		__cpu_simple_lock(&ld->ld_spinlock);
665a4e0004bSad 		lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
666a4e0004bSad 		return;
667b07ec3fcSad 	}
668a4e0004bSad 	splx(s);
669a4e0004bSad 	if (l->l_shlocks != 0) {
670a27531bcSchristos 		TAILQ_FOREACH(ld, &ld_all, ld_achain) {
671a27531bcSchristos 			if (ld->ld_lockops->lo_type == LOCKOPS_CV)
672a27531bcSchristos 				continue;
673a27531bcSchristos 			if (ld->ld_lwp == l)
674a27531bcSchristos 				lockdebug_dump(ld, printf);
675a27531bcSchristos 		}
676a27531bcSchristos 		panic("%s: holding %d shared locks", __func__, l->l_shlocks);
677b07ec3fcSad 	}
678b07ec3fcSad }
679b07ec3fcSad 
680b07ec3fcSad /*
68111dc6399Sad  * lockdebug_mem_check:
68211dc6399Sad  *
68311dc6399Sad  *	Check for in-use locks within a memory region that is
68438d5e341Syamt  *	being freed.
68511dc6399Sad  */
68611dc6399Sad void
68711dc6399Sad lockdebug_mem_check(const char *func, void *base, size_t sz)
68811dc6399Sad {
68911dc6399Sad 	lockdebug_t *ld;
690a4e0004bSad 	struct cpu_info *ci;
691461cd942Sad 	int s;
69211dc6399Sad 
6937eb6056fSad 	if (panicstr != NULL || ld_panic)
694ea3f10f7Sad 		return;
695ea3f10f7Sad 
696a4e0004bSad 	s = splhigh();
697a4e0004bSad 	ci = curcpu();
698a4e0004bSad 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
69938d5e341Syamt 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
700461cd942Sad 	if (ld != NULL) {
701461cd942Sad 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
702461cd942Sad 
703461cd942Sad 		if ((uintptr_t)base > lock)
704461cd942Sad 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
705461cd942Sad 			    __func__, ld, base, sz);
706461cd942Sad 		if (lock >= (uintptr_t)base + sz)
707461cd942Sad 			ld = NULL;
708461cd942Sad 	}
709a4e0004bSad 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
710a4e0004bSad 	if (ld != NULL) {
711a4e0004bSad 		__cpu_simple_lock(&ld->ld_spinlock);
712a4e0004bSad 		lockdebug_abort1(ld, s, func,
71311dc6399Sad 		    "allocation contains active lock", !cold);
714a4e0004bSad 		return;
715a4e0004bSad 	}
716a4e0004bSad 	splx(s);
71711dc6399Sad }
71811dc6399Sad 
71911dc6399Sad /*
720b07ec3fcSad  * lockdebug_dump:
721b07ec3fcSad  *
722b07ec3fcSad  *	Dump information about a lock on panic, or for DDB.
723b07ec3fcSad  */
724b07ec3fcSad static void
725a67c3c89Schristos lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
726a67c3c89Schristos     __printflike(1, 2))
727b07ec3fcSad {
728b07ec3fcSad 	int sleeper = (ld->ld_flags & LD_SLEEPER);
729b07ec3fcSad 
730b07ec3fcSad 	(*pr)(
731b07ec3fcSad 	    "lock address : %#018lx type     : %18s\n"
7327b8f5124Sad 	    "initialized  : %#018lx",
7337b8f5124Sad 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
7347b8f5124Sad 	    (long)ld->ld_initaddr);
7357b8f5124Sad 
7367b8f5124Sad 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
7375fb876b9Snjoly 		(*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
7387b8f5124Sad 	} else {
7397b8f5124Sad 		(*pr)("\n"
740b07ec3fcSad 		    "shared holds : %18u exclusive: %18u\n"
741b07ec3fcSad 		    "shares wanted: %18u exclusive: %18u\n"
742b07ec3fcSad 		    "current cpu  : %18u last held: %18u\n"
743b07ec3fcSad 		    "current lwp  : %#018lx last held: %#018lx\n"
744521a86d5Smatt 		    "last locked%c : %#018lx unlocked%c: %#018lx\n",
745b07ec3fcSad 		    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
746b07ec3fcSad 		    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
747ac8f6353Srmind 		    (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
748b07ec3fcSad 		    (long)curlwp, (long)ld->ld_lwp,
749521a86d5Smatt 		    ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
750521a86d5Smatt 		    (long)ld->ld_locked,
751521a86d5Smatt 		    ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
752521a86d5Smatt 		    (long)ld->ld_unlocked);
7537b8f5124Sad 	}
754b07ec3fcSad 
755b07ec3fcSad 	if (ld->ld_lockops->lo_dump != NULL)
756b07ec3fcSad 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
757b07ec3fcSad 
758b07ec3fcSad 	if (sleeper) {
759b07ec3fcSad 		(*pr)("\n");
760b07ec3fcSad 		turnstile_print(ld->ld_lock, pr);
761b07ec3fcSad 	}
762b07ec3fcSad }
763b07ec3fcSad 
764b07ec3fcSad /*
7657eb6056fSad  * lockdebug_abort1:
766b07ec3fcSad  *
7677eb6056fSad  *	An error has been trapped - dump lock info and panic.
768b07ec3fcSad  */
76964e54fbbSad static void
770a4e0004bSad lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
77111dc6399Sad 		 const char *msg, bool dopanic)
772b07ec3fcSad {
773b07ec3fcSad 
7747eb6056fSad 	/*
775d9ddb522Schristos 	 * Don't make the situation worse if the system is already going
7767eb6056fSad 	 * down in flames.  Once a panic is triggered, lockdebug state
7777eb6056fSad 	 * becomes stale and cannot be trusted.
7787eb6056fSad 	 */
7797eb6056fSad 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
780a4e0004bSad 		__cpu_simple_unlock(&ld->ld_spinlock);
781a4e0004bSad 		splx(s);
7827eb6056fSad 		return;
7837eb6056fSad 	}
7847eb6056fSad 
785b07ec3fcSad 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
786b07ec3fcSad 	    func, msg);
787b07ec3fcSad 	lockdebug_dump(ld, printf_nolog);
788a4e0004bSad 	__cpu_simple_unlock(&ld->ld_spinlock);
789a4e0004bSad 	splx(s);
790b07ec3fcSad 	printf_nolog("\n");
79111dc6399Sad 	if (dopanic)
792c3caf0b8Schristos 		panic("LOCKDEBUG: %s error: %s: %s", ld->ld_lockops->lo_name,
793c3caf0b8Schristos 		    func, msg);
794b07ec3fcSad }
795b07ec3fcSad 
796b07ec3fcSad #endif	/* LOCKDEBUG */
797b07ec3fcSad 
798b07ec3fcSad /*
799b07ec3fcSad  * lockdebug_lock_print:
800b07ec3fcSad  *
801b07ec3fcSad  *	Handle the DDB 'show lock' command.
802b07ec3fcSad  */
803b07ec3fcSad #ifdef DDB
804b07ec3fcSad void
805b07ec3fcSad lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
806b07ec3fcSad {
807b07ec3fcSad #ifdef LOCKDEBUG
808b07ec3fcSad 	lockdebug_t *ld;
809b07ec3fcSad 
810b07ec3fcSad 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
811648f423cSdyoung 		if (ld->ld_lock == NULL)
812648f423cSdyoung 			continue;
813648f423cSdyoung 		if (addr == NULL || ld->ld_lock == addr) {
814b07ec3fcSad 			lockdebug_dump(ld, pr);
815648f423cSdyoung 			if (addr != NULL)
816b07ec3fcSad 				return;
817b07ec3fcSad 		}
818b07ec3fcSad 	}
819648f423cSdyoung 	if (addr != NULL) {
820648f423cSdyoung 		(*pr)("Sorry, no record of a lock with address %p found.\n",
821648f423cSdyoung 		    addr);
822648f423cSdyoung 	}
823b07ec3fcSad #else
824b07ec3fcSad 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
825b07ec3fcSad #endif	/* LOCKDEBUG */
826b07ec3fcSad }
827b07ec3fcSad #endif	/* DDB */
828b07ec3fcSad 
829b07ec3fcSad /*
830b07ec3fcSad  * lockdebug_abort:
831b07ec3fcSad  *
832b07ec3fcSad  *	An error has been trapped - dump lock info and call panic().
833b07ec3fcSad  */
834b07ec3fcSad void
83538d5e341Syamt lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
83638d5e341Syamt 		const char *msg)
837b07ec3fcSad {
838b07ec3fcSad #ifdef LOCKDEBUG
839b07ec3fcSad 	lockdebug_t *ld;
840a4e0004bSad 	int s;
841b07ec3fcSad 
842a4e0004bSad 	s = splhigh();
843ca70a1c0Srafal 	if ((ld = lockdebug_lookup(lock,
844ca70a1c0Srafal 			(uintptr_t) __builtin_return_address(0))) != NULL) {
845a4e0004bSad 		lockdebug_abort1(ld, s, func, msg, true);
846a4e0004bSad 		return;
847b07ec3fcSad 	}
848a4e0004bSad 	splx(s);
849b07ec3fcSad #endif	/* LOCKDEBUG */
850b07ec3fcSad 
8517eb6056fSad 	/*
8527eb6056fSad 	 * Complain first on the occurrance only.  Otherwise proceeed to
8537eb6056fSad 	 * panic where we will `rendezvous' with other CPUs if the machine
8547eb6056fSad 	 * is going down in flames.
8557eb6056fSad 	 */
8567eb6056fSad 	if (atomic_inc_uint_nv(&ld_panic) == 1) {
857b07ec3fcSad 		printf_nolog("%s error: %s: %s\n\n"
858b07ec3fcSad 		    "lock address : %#018lx\n"
859b07ec3fcSad 		    "current cpu  : %18d\n"
860b07ec3fcSad 		    "current lwp  : %#018lx\n",
861ac8f6353Srmind 		    ops->lo_name, func, msg, (long)lock,
862ac8f6353Srmind 		    (int)cpu_index(curcpu()), (long)curlwp);
863b07ec3fcSad 		(*ops->lo_dump)(lock);
864b07ec3fcSad 		printf_nolog("\n");
8657eb6056fSad 	}
8667eb6056fSad 
867428ae672Sriastradh 	panic("lock error: %s: %s: %s: lock %p cpu %d lwp %p",
868428ae672Sriastradh 	    ops->lo_name, func, msg, lock, cpu_index(curcpu()), curlwp);
869b07ec3fcSad }
870