xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision 2de962bd804263c16657f586aa00f1704045df8e)
1 /*	$NetBSD: subr_lockdebug.c,v 1.34 2008/05/06 18:40:57 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Basic lock debugging code shared among lock primitives.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.34 2008/05/06 18:40:57 ad Exp $");
38 
39 #include "opt_ddb.h"
40 
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51 
52 #include <lib/libkern/rb.h>
53 
54 #include <machine/lock.h>
55 
56 unsigned int		ld_panic;
57 
58 #ifdef LOCKDEBUG
59 
60 #define	LD_BATCH_SHIFT	9
61 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
62 #define	LD_BATCH_MASK	(LD_BATCH - 1)
63 #define	LD_MAX_LOCKS	1048576
64 #define	LD_SLOP		16
65 
66 #define	LD_LOCKED	0x01
67 #define	LD_SLEEPER	0x02
68 
69 #define	LD_WRITE_LOCK	0x80000000
70 
71 typedef struct lockdebug {
72 	struct rb_node	ld_rb_node;	/* must be the first member */
73 	__cpu_simple_lock_t ld_spinlock;
74 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
75 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
76 	volatile void	*ld_lock;
77 	lockops_t	*ld_lockops;
78 	struct lwp	*ld_lwp;
79 	uintptr_t	ld_locked;
80 	uintptr_t	ld_unlocked;
81 	uintptr_t	ld_initaddr;
82 	uint16_t	ld_shares;
83 	uint16_t	ld_cpu;
84 	uint8_t		ld_flags;
85 	uint8_t		ld_shwant;	/* advisory */
86 	uint8_t		ld_exwant;	/* advisory */
87 	uint8_t		ld_unused;
88 } volatile lockdebug_t;
89 
90 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
91 
92 __cpu_simple_lock_t	ld_mod_lk;
93 lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
94 lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
95 int			ld_nfree;
96 int			ld_freeptr;
97 int			ld_recurse;
98 bool			ld_nomore;
99 lockdebug_t		ld_prime[LD_BATCH];
100 
101 static void	lockdebug_abort1(lockdebug_t *, int, const char *,
102 				 const char *, bool);
103 static int	lockdebug_more(int);
104 static void	lockdebug_init(void);
105 
106 static signed int
107 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
108 {
109 	const lockdebug_t *ld1 = (const void *)n1;
110 	const lockdebug_t *ld2 = (const void *)n2;
111 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
112 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
113 
114 	if (a < b)
115 		return 1;
116 	if (a > b)
117 		return -1;
118 	return 0;
119 }
120 
121 static signed int
122 ld_rb_compare_key(const struct rb_node *n, const void *key)
123 {
124 	const lockdebug_t *ld = (const void *)n;
125 	const uintptr_t a = (uintptr_t)ld->ld_lock;
126 	const uintptr_t b = (uintptr_t)key;
127 
128 	if (a < b)
129 		return 1;
130 	if (a > b)
131 		return -1;
132 	return 0;
133 }
134 
135 static struct rb_tree ld_rb_tree;
136 
137 static const struct rb_tree_ops ld_rb_tree_ops = {
138 	.rb_compare_nodes = ld_rb_compare_nodes,
139 	.rb_compare_key = ld_rb_compare_key,
140 };
141 
142 static inline lockdebug_t *
143 lockdebug_lookup1(volatile void *lock)
144 {
145 	lockdebug_t *ld;
146 	struct cpu_info *ci;
147 
148 	ci = curcpu();
149 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
150 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
151 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
152 	if (ld == NULL) {
153 		return NULL;
154 	}
155 	__cpu_simple_lock(&ld->ld_spinlock);
156 
157 	return ld;
158 }
159 
160 static void
161 lockdebug_lock_cpus(void)
162 {
163 	CPU_INFO_ITERATOR cii;
164 	struct cpu_info *ci;
165 
166 	for (CPU_INFO_FOREACH(cii, ci)) {
167 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
168 	}
169 }
170 
171 static void
172 lockdebug_unlock_cpus(void)
173 {
174 	CPU_INFO_ITERATOR cii;
175 	struct cpu_info *ci;
176 
177 	for (CPU_INFO_FOREACH(cii, ci)) {
178 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
179 	}
180 }
181 
182 /*
183  * lockdebug_lookup:
184  *
185  *	Find a lockdebug structure by a pointer to a lock and return it locked.
186  */
187 static inline lockdebug_t *
188 lockdebug_lookup(volatile void *lock)
189 {
190 	lockdebug_t *ld;
191 
192 	ld = lockdebug_lookup1(lock);
193 	if (ld == NULL)
194 		panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
195 	return ld;
196 }
197 
198 /*
199  * lockdebug_init:
200  *
201  *	Initialize the lockdebug system.  Allocate an initial pool of
202  *	lockdebug structures before the VM system is up and running.
203  */
204 static void
205 lockdebug_init(void)
206 {
207 	lockdebug_t *ld;
208 	int i;
209 
210 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
211 	TAILQ_INIT(&curlwp->l_ld_locks);
212 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
213 	__cpu_simple_lock_init(&ld_mod_lk);
214 
215 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
216 
217 	ld = ld_prime;
218 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
219 		__cpu_simple_lock_init(&ld->ld_spinlock);
220 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
221 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
222 	}
223 	ld_freeptr = 1;
224 	ld_nfree = LD_BATCH - 1;
225 }
226 
227 /*
228  * lockdebug_alloc:
229  *
230  *	A lock is being initialized, so allocate an associated debug
231  *	structure.
232  */
233 bool
234 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
235 {
236 	struct cpu_info *ci;
237 	lockdebug_t *ld;
238 	int s;
239 
240 	if (lo == NULL || panicstr != NULL || ld_panic)
241 		return false;
242 	if (ld_freeptr == 0)
243 		lockdebug_init();
244 
245 	s = splhigh();
246 	__cpu_simple_lock(&ld_mod_lk);
247 	if ((ld = lockdebug_lookup1(lock)) != NULL) {
248 		__cpu_simple_unlock(&ld_mod_lk);
249 		lockdebug_abort1(ld, s, __func__, "already initialized", true);
250 		return false;
251 	}
252 
253 	/*
254 	 * Pinch a new debug structure.  We may recurse because we call
255 	 * kmem_alloc(), which may need to initialize new locks somewhere
256 	 * down the path.  If not recursing, we try to maintain at least
257 	 * LD_SLOP structures free, which should hopefully be enough to
258 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
259 	 * worry: we'll just mark the lock as not having an ID.
260 	 */
261 	ci = curcpu();
262 	ci->ci_lkdebug_recurse++;
263 	if (TAILQ_EMPTY(&ld_free)) {
264 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
265 			ci->ci_lkdebug_recurse--;
266 			__cpu_simple_unlock(&ld_mod_lk);
267 			splx(s);
268 			return false;
269 		}
270 		s = lockdebug_more(s);
271 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
272 		s = lockdebug_more(s);
273 	}
274 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
275 		__cpu_simple_unlock(&ld_mod_lk);
276 		splx(s);
277 		return false;
278 	}
279 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
280 	ld_nfree--;
281 	ci->ci_lkdebug_recurse--;
282 
283 	if (ld->ld_lock != NULL) {
284 		panic("lockdebug_alloc: corrupt table");
285 	}
286 
287 	/* Initialise the structure. */
288 	ld->ld_lock = lock;
289 	ld->ld_lockops = lo;
290 	ld->ld_locked = 0;
291 	ld->ld_unlocked = 0;
292 	ld->ld_lwp = NULL;
293 	ld->ld_initaddr = initaddr;
294 	ld->ld_flags = lo->lo_sleeplock ? LD_SLEEPER : 0;
295 	lockdebug_lock_cpus();
296 	rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
297 	lockdebug_unlock_cpus();
298 	__cpu_simple_unlock(&ld_mod_lk);
299 
300 	splx(s);
301 	return true;
302 }
303 
304 /*
305  * lockdebug_free:
306  *
307  *	A lock is being destroyed, so release debugging resources.
308  */
309 void
310 lockdebug_free(volatile void *lock)
311 {
312 	lockdebug_t *ld;
313 	int s;
314 
315 	if (panicstr != NULL || ld_panic)
316 		return;
317 
318 	s = splhigh();
319 	__cpu_simple_lock(&ld_mod_lk);
320 	ld = lockdebug_lookup(lock);
321 	if (ld == NULL) {
322 		__cpu_simple_unlock(&ld_mod_lk);
323 		panic("lockdebug_free: destroying uninitialized lock %p"
324 		    "(ld_lock=%p)", lock, ld->ld_lock);
325 		lockdebug_abort1(ld, s, __func__, "lock record follows", true);
326 		return;
327 	}
328 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
329 		__cpu_simple_unlock(&ld_mod_lk);
330 		lockdebug_abort1(ld, s, __func__, "is locked", true);
331 		return;
332 	}
333 	lockdebug_lock_cpus();
334 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
335 	lockdebug_unlock_cpus();
336 	ld->ld_lock = NULL;
337 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
338 	ld_nfree++;
339 	__cpu_simple_unlock(&ld->ld_spinlock);
340 	__cpu_simple_unlock(&ld_mod_lk);
341 	splx(s);
342 }
343 
344 /*
345  * lockdebug_more:
346  *
347  *	Allocate a batch of debug structures and add to the free list.
348  *	Must be called with ld_mod_lk held.
349  */
350 static int
351 lockdebug_more(int s)
352 {
353 	lockdebug_t *ld;
354 	void *block;
355 	int i, base, m;
356 
357 	while (ld_nfree < LD_SLOP) {
358 		__cpu_simple_unlock(&ld_mod_lk);
359 		splx(s);
360 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
361 		s = splhigh();
362 		__cpu_simple_lock(&ld_mod_lk);
363 
364 		if (block == NULL)
365 			return s;
366 
367 		if (ld_nfree > LD_SLOP) {
368 			/* Somebody beat us to it. */
369 			__cpu_simple_unlock(&ld_mod_lk);
370 			splx(s);
371 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
372 			s = splhigh();
373 			__cpu_simple_lock(&ld_mod_lk);
374 			continue;
375 		}
376 
377 		base = ld_freeptr;
378 		ld_nfree += LD_BATCH;
379 		ld = block;
380 		base <<= LD_BATCH_SHIFT;
381 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
382 
383 		if (m == LD_MAX_LOCKS)
384 			ld_nomore = true;
385 
386 		for (i = base; i < m; i++, ld++) {
387 			__cpu_simple_lock_init(&ld->ld_spinlock);
388 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
389 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
390 		}
391 
392 		membar_producer();
393 	}
394 
395 	return s;
396 }
397 
398 /*
399  * lockdebug_wantlock:
400  *
401  *	Process the preamble to a lock acquire.
402  */
403 void
404 lockdebug_wantlock(volatile void *lock, uintptr_t where, bool shared,
405 		   bool trylock)
406 {
407 	struct lwp *l = curlwp;
408 	lockdebug_t *ld;
409 	bool recurse;
410 	int s;
411 
412 	(void)shared;
413 	recurse = false;
414 
415 	if (panicstr != NULL || ld_panic)
416 		return;
417 
418 	s = splhigh();
419 	if ((ld = lockdebug_lookup(lock)) == NULL) {
420 		splx(s);
421 		return;
422 	}
423 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
424 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
425 			if (ld->ld_lwp == l && !(shared && trylock))
426 				recurse = true;
427 		} else if (ld->ld_cpu == (uint16_t)cpu_number())
428 			recurse = true;
429 	}
430 	if (cpu_intr_p()) {
431 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
432 			lockdebug_abort1(ld, s, __func__,
433 			    "acquiring sleep lock from interrupt context",
434 			    true);
435 			return;
436 		}
437 	}
438 	if (shared)
439 		ld->ld_shwant++;
440 	else
441 		ld->ld_exwant++;
442 	if (recurse) {
443 		lockdebug_abort1(ld, s, __func__, "locking against myself",
444 		    true);
445 		return;
446 	}
447 	__cpu_simple_unlock(&ld->ld_spinlock);
448 	splx(s);
449 }
450 
451 /*
452  * lockdebug_locked:
453  *
454  *	Process a lock acquire operation.
455  */
456 void
457 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
458 {
459 	struct lwp *l = curlwp;
460 	lockdebug_t *ld;
461 	int s;
462 
463 	if (panicstr != NULL || ld_panic)
464 		return;
465 
466 	s = splhigh();
467 	if ((ld = lockdebug_lookup(lock)) == NULL) {
468 		splx(s);
469 		return;
470 	}
471 	if (shared) {
472 		l->l_shlocks++;
473 		ld->ld_shares++;
474 		ld->ld_shwant--;
475 	} else {
476 		if ((ld->ld_flags & LD_LOCKED) != 0) {
477 			lockdebug_abort1(ld, s, __func__, "already locked",
478 			    true);
479 			return;
480 		}
481 		ld->ld_flags |= LD_LOCKED;
482 		ld->ld_locked = where;
483 		ld->ld_exwant--;
484 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
485 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
486 		} else {
487 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
488 			    ld, ld_chain);
489 		}
490 	}
491 	ld->ld_cpu = (uint16_t)cpu_number();
492 	ld->ld_lwp = l;
493 	__cpu_simple_unlock(&ld->ld_spinlock);
494 	splx(s);
495 }
496 
497 /*
498  * lockdebug_unlocked:
499  *
500  *	Process a lock release operation.
501  */
502 void
503 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
504 {
505 	struct lwp *l = curlwp;
506 	lockdebug_t *ld;
507 	int s;
508 
509 	if (panicstr != NULL || ld_panic)
510 		return;
511 
512 	s = splhigh();
513 	if ((ld = lockdebug_lookup(lock)) == NULL) {
514 		splx(s);
515 		return;
516 	}
517 	if (shared) {
518 		if (l->l_shlocks == 0) {
519 			lockdebug_abort1(ld, s, __func__,
520 			    "no shared locks held by LWP", true);
521 			return;
522 		}
523 		if (ld->ld_shares == 0) {
524 			lockdebug_abort1(ld, s, __func__,
525 			    "no shared holds on this lock", true);
526 			return;
527 		}
528 		l->l_shlocks--;
529 		ld->ld_shares--;
530 		if (ld->ld_lwp == l)
531 			ld->ld_lwp = NULL;
532 		if (ld->ld_cpu == (uint16_t)cpu_number())
533 			ld->ld_cpu = (uint16_t)-1;
534 	} else {
535 		if ((ld->ld_flags & LD_LOCKED) == 0) {
536 			lockdebug_abort1(ld, s, __func__, "not locked", true);
537 			return;
538 		}
539 
540 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
541 			if (ld->ld_lwp != curlwp) {
542 				lockdebug_abort1(ld, s, __func__,
543 				    "not held by current LWP", true);
544 				return;
545 			}
546 			ld->ld_flags &= ~LD_LOCKED;
547 			ld->ld_unlocked = where;
548 			ld->ld_lwp = NULL;
549 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
550 		} else {
551 			if (ld->ld_cpu != (uint16_t)cpu_number()) {
552 				lockdebug_abort1(ld, s, __func__,
553 				    "not held by current CPU", true);
554 				return;
555 			}
556 			ld->ld_flags &= ~LD_LOCKED;
557 			ld->ld_unlocked = where;
558 			ld->ld_lwp = NULL;
559 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
560 			    ld_chain);
561 		}
562 	}
563 	__cpu_simple_unlock(&ld->ld_spinlock);
564 	splx(s);
565 }
566 
567 /*
568  * lockdebug_barrier:
569  *
570  *	Panic if we hold more than one specified spin lock, and optionally,
571  *	if we hold sleep locks.
572  */
573 void
574 lockdebug_barrier(volatile void *spinlock, int slplocks)
575 {
576 	struct lwp *l = curlwp;
577 	lockdebug_t *ld;
578 	int s;
579 
580 	if (panicstr != NULL || ld_panic)
581 		return;
582 
583 	s = splhigh();
584 	if ((l->l_pflag & LP_INTR) == 0) {
585 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
586 			if (ld->ld_lock == spinlock) {
587 				continue;
588 			}
589 			__cpu_simple_lock(&ld->ld_spinlock);
590 			lockdebug_abort1(ld, s, __func__,
591 			    "spin lock held", true);
592 			return;
593 		}
594 	}
595 	if (slplocks) {
596 		splx(s);
597 		return;
598 	}
599 	if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
600 		__cpu_simple_lock(&ld->ld_spinlock);
601 		lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
602 		return;
603 	}
604 	splx(s);
605 	if (l->l_shlocks != 0) {
606 		panic("lockdebug_barrier: holding %d shared locks",
607 		    l->l_shlocks);
608 	}
609 }
610 
611 /*
612  * lockdebug_mem_check:
613  *
614  *	Check for in-use locks within a memory region that is
615  *	being freed.
616  */
617 void
618 lockdebug_mem_check(const char *func, void *base, size_t sz)
619 {
620 	lockdebug_t *ld;
621 	struct cpu_info *ci;
622 	int s;
623 
624 	if (panicstr != NULL || ld_panic)
625 		return;
626 
627 	s = splhigh();
628 	ci = curcpu();
629 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
630 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
631 	if (ld != NULL) {
632 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
633 
634 		if ((uintptr_t)base > lock)
635 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
636 			    __func__, ld, base, sz);
637 		if (lock >= (uintptr_t)base + sz)
638 			ld = NULL;
639 	}
640 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
641 	if (ld != NULL) {
642 		__cpu_simple_lock(&ld->ld_spinlock);
643 		lockdebug_abort1(ld, s, func,
644 		    "allocation contains active lock", !cold);
645 		return;
646 	}
647 	splx(s);
648 }
649 
650 /*
651  * lockdebug_dump:
652  *
653  *	Dump information about a lock on panic, or for DDB.
654  */
655 static void
656 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
657 {
658 	int sleeper = (ld->ld_flags & LD_SLEEPER);
659 
660 	(*pr)(
661 	    "lock address : %#018lx type     : %18s\n"
662 	    "shared holds : %18u exclusive: %18u\n"
663 	    "shares wanted: %18u exclusive: %18u\n"
664 	    "current cpu  : %18u last held: %18u\n"
665 	    "current lwp  : %#018lx last held: %#018lx\n"
666 	    "last locked  : %#018lx unlocked : %#018lx\n"
667 	    "initialized  : %#018lx\n",
668 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
669 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
670 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
671 	    (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
672 	    (long)curlwp, (long)ld->ld_lwp,
673 	    (long)ld->ld_locked, (long)ld->ld_unlocked,
674 	    (long)ld->ld_initaddr);
675 
676 	if (ld->ld_lockops->lo_dump != NULL)
677 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
678 
679 	if (sleeper) {
680 		(*pr)("\n");
681 		turnstile_print(ld->ld_lock, pr);
682 	}
683 }
684 
685 /*
686  * lockdebug_abort1:
687  *
688  *	An error has been trapped - dump lock info and panic.
689  */
690 static void
691 lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
692 		 const char *msg, bool dopanic)
693 {
694 
695 	/*
696 	 * Don't make the situation wose if the system is already going
697 	 * down in flames.  Once a panic is triggered, lockdebug state
698 	 * becomes stale and cannot be trusted.
699 	 */
700 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
701 		__cpu_simple_unlock(&ld->ld_spinlock);
702 		splx(s);
703 		return;
704 	}
705 
706 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
707 	    func, msg);
708 	lockdebug_dump(ld, printf_nolog);
709 	__cpu_simple_unlock(&ld->ld_spinlock);
710 	splx(s);
711 	printf_nolog("\n");
712 	if (dopanic)
713 		panic("LOCKDEBUG");
714 }
715 
716 #endif	/* LOCKDEBUG */
717 
718 /*
719  * lockdebug_lock_print:
720  *
721  *	Handle the DDB 'show lock' command.
722  */
723 #ifdef DDB
724 void
725 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
726 {
727 #ifdef LOCKDEBUG
728 	lockdebug_t *ld;
729 
730 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
731 		if (ld->ld_lock == addr) {
732 			lockdebug_dump(ld, pr);
733 			return;
734 		}
735 	}
736 	(*pr)("Sorry, no record of a lock with address %p found.\n", addr);
737 #else
738 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
739 #endif	/* LOCKDEBUG */
740 }
741 #endif	/* DDB */
742 
743 /*
744  * lockdebug_abort:
745  *
746  *	An error has been trapped - dump lock info and call panic().
747  */
748 void
749 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
750 		const char *msg)
751 {
752 #ifdef LOCKDEBUG
753 	lockdebug_t *ld;
754 	int s;
755 
756 	s = splhigh();
757 	if ((ld = lockdebug_lookup(lock)) != NULL) {
758 		lockdebug_abort1(ld, s, func, msg, true);
759 		return;
760 	}
761 	splx(s);
762 #endif	/* LOCKDEBUG */
763 
764 	/*
765 	 * Complain first on the occurrance only.  Otherwise proceeed to
766 	 * panic where we will `rendezvous' with other CPUs if the machine
767 	 * is going down in flames.
768 	 */
769 	if (atomic_inc_uint_nv(&ld_panic) == 1) {
770 		printf_nolog("%s error: %s: %s\n\n"
771 		    "lock address : %#018lx\n"
772 		    "current cpu  : %18d\n"
773 		    "current lwp  : %#018lx\n",
774 		    ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
775 		    (long)curlwp);
776 		(*ops->lo_dump)(lock);
777 		printf_nolog("\n");
778 	}
779 
780 	panic("lock error");
781 }
782