xref: /netbsd-src/sys/kern/subr_lockdebug.c (revision bfb6cb13d599546df69c7e4d20d70e22e15a549d)
1 /*	$NetBSD: subr_lockdebug.c,v 1.8 2007/06/15 20:17:08 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Basic lock debugging code shared among lock primatives.
41  */
42 
43 #include "opt_multiprocessor.h"
44 #include "opt_ddb.h"
45 
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.8 2007/06/15 20:17:08 ad Exp $");
48 
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/systm.h>
52 #include <sys/kmem.h>
53 #include <sys/lock.h>
54 #include <sys/lockdebug.h>
55 #include <sys/sleepq.h>
56 
57 #include <machine/cpu.h>
58 
59 #ifdef LOCKDEBUG
60 
61 #define	LD_BATCH_SHIFT	9
62 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
63 #define	LD_BATCH_MASK	(LD_BATCH - 1)
64 #define	LD_MAX_LOCKS	1048576
65 #define	LD_SLOP		16
66 
67 #define	LD_LOCKED	0x01
68 #define	LD_SLEEPER	0x02
69 
70 #define	LD_NOID		(LD_MAX_LOCKS + 1)
71 
72 typedef union lockdebuglk {
73 	struct {
74 		__cpu_simple_lock_t	lku_lock;
75 		int			lku_oldspl;
76 	} ul;
77 	uint8_t	lk_pad[64];
78 } volatile __aligned(64) lockdebuglk_t;
79 
80 #define	lk_lock		ul.lku_lock
81 #define	lk_oldspl	ul.lku_oldspl
82 
83 typedef struct lockdebug {
84 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
85 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
86 	volatile void	*ld_lock;
87 	lockops_t	*ld_lockops;
88 	struct lwp	*ld_lwp;
89 	uintptr_t	ld_locked;
90 	uintptr_t	ld_unlocked;
91 	u_int		ld_id;
92 	uint16_t	ld_shares;
93 	uint16_t	ld_cpu;
94 	uint8_t		ld_flags;
95 	uint8_t		ld_shwant;	/* advisory */
96 	uint8_t		ld_exwant;	/* advisory */
97 	uint8_t		ld_unused;
98 } volatile lockdebug_t;
99 
100 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
101 
102 lockdebuglk_t		ld_sleeper_lk;
103 lockdebuglk_t		ld_spinner_lk;
104 lockdebuglk_t		ld_free_lk;
105 
106 lockdebuglist_t		ld_sleepers;
107 lockdebuglist_t		ld_spinners;
108 lockdebuglist_t		ld_free;
109 lockdebuglist_t		ld_all;
110 int			ld_nfree;
111 int			ld_freeptr;
112 int			ld_recurse;
113 bool			ld_nomore;
114 lockdebug_t		*ld_table[LD_MAX_LOCKS / LD_BATCH];
115 
116 lockdebug_t		ld_prime[LD_BATCH];
117 
118 static void	lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
119 				 const char *, const char *);
120 static void	lockdebug_more(void);
121 static void	lockdebug_init(void);
122 
123 static inline void
124 lockdebug_lock(lockdebuglk_t *lk)
125 {
126 	int s;
127 
128 	s = splhigh();
129 	__cpu_simple_lock(&lk->lk_lock);
130 	lk->lk_oldspl = s;
131 }
132 
133 static inline void
134 lockdebug_unlock(lockdebuglk_t *lk)
135 {
136 	int s;
137 
138 	s = lk->lk_oldspl;
139 	__cpu_simple_unlock(&(lk->lk_lock));
140 	splx(s);
141 }
142 
143 /*
144  * lockdebug_lookup:
145  *
146  *	Find a lockdebug structure by ID and return it locked.
147  */
148 static inline lockdebug_t *
149 lockdebug_lookup(u_int id, lockdebuglk_t **lk)
150 {
151 	lockdebug_t *base, *ld;
152 
153 	if (id == LD_NOID)
154 		return NULL;
155 
156 	if (id == 0 || id >= LD_MAX_LOCKS)
157 		panic("lockdebug_lookup: uninitialized lock (1, id=%d)", id);
158 
159 	base = ld_table[id >> LD_BATCH_SHIFT];
160 	ld = base + (id & LD_BATCH_MASK);
161 
162 	if (base == NULL || ld->ld_lock == NULL || ld->ld_id != id)
163 		panic("lockdebug_lookup: uninitialized lock (2, id=%d)", id);
164 
165 	if ((ld->ld_flags & LD_SLEEPER) != 0)
166 		*lk = &ld_sleeper_lk;
167 	else
168 		*lk = &ld_spinner_lk;
169 
170 	lockdebug_lock(*lk);
171 	return ld;
172 }
173 
174 /*
175  * lockdebug_init:
176  *
177  *	Initialize the lockdebug system.  Allocate an initial pool of
178  *	lockdebug structures before the VM system is up and running.
179  */
180 static void
181 lockdebug_init(void)
182 {
183 	lockdebug_t *ld;
184 	int i;
185 
186 	__cpu_simple_lock_init(&ld_sleeper_lk.lk_lock);
187 	__cpu_simple_lock_init(&ld_spinner_lk.lk_lock);
188 	__cpu_simple_lock_init(&ld_free_lk.lk_lock);
189 
190 	TAILQ_INIT(&ld_free);
191 	TAILQ_INIT(&ld_all);
192 	TAILQ_INIT(&ld_sleepers);
193 	TAILQ_INIT(&ld_spinners);
194 
195 	ld = ld_prime;
196 	ld_table[0] = ld;
197 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
198 		ld->ld_id = i;
199 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
200 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
201 	}
202 	ld_freeptr = 1;
203 	ld_nfree = LD_BATCH - 1;
204 }
205 
206 /*
207  * lockdebug_alloc:
208  *
209  *	A lock is being initialized, so allocate an associated debug
210  *	structure.
211  */
212 u_int
213 lockdebug_alloc(volatile void *lock, lockops_t *lo)
214 {
215 	struct cpu_info *ci;
216 	lockdebug_t *ld;
217 
218 	if (lo == NULL || panicstr != NULL)
219 		return LD_NOID;
220 	if (ld_freeptr == 0)
221 		lockdebug_init();
222 
223 	ci = curcpu();
224 
225 	/*
226 	 * Pinch a new debug structure.  We may recurse because we call
227 	 * kmem_alloc(), which may need to initialize new locks somewhere
228 	 * down the path.  If not recursing, we try to maintain at least
229 	 * LD_SLOP structures free, which should hopefully be enough to
230 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
231 	 * worry: we'll just mark the lock as not having an ID.
232 	 */
233 	lockdebug_lock(&ld_free_lk);
234 	ci->ci_lkdebug_recurse++;
235 
236 	if (TAILQ_EMPTY(&ld_free)) {
237 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
238 			ci->ci_lkdebug_recurse--;
239 			lockdebug_unlock(&ld_free_lk);
240 			return LD_NOID;
241 		}
242 		lockdebug_more();
243 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
244 		lockdebug_more();
245 
246 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
247 		lockdebug_unlock(&ld_free_lk);
248 		return LD_NOID;
249 	}
250 
251 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
252 	ld_nfree--;
253 
254 	ci->ci_lkdebug_recurse--;
255 	lockdebug_unlock(&ld_free_lk);
256 
257 	if (ld->ld_lock != NULL)
258 		panic("lockdebug_alloc: corrupt table");
259 
260 	if (lo->lo_sleeplock)
261 		lockdebug_lock(&ld_sleeper_lk);
262 	else
263 		lockdebug_lock(&ld_spinner_lk);
264 
265 	/* Initialise the structure. */
266 	ld->ld_lock = lock;
267 	ld->ld_lockops = lo;
268 	ld->ld_locked = 0;
269 	ld->ld_unlocked = 0;
270 	ld->ld_lwp = NULL;
271 
272 	if (lo->lo_sleeplock) {
273 		ld->ld_flags = LD_SLEEPER;
274 		lockdebug_unlock(&ld_sleeper_lk);
275 	} else {
276 		ld->ld_flags = 0;
277 		lockdebug_unlock(&ld_spinner_lk);
278 	}
279 
280 	return ld->ld_id;
281 }
282 
283 /*
284  * lockdebug_free:
285  *
286  *	A lock is being destroyed, so release debugging resources.
287  */
288 void
289 lockdebug_free(volatile void *lock, u_int id)
290 {
291 	lockdebug_t *ld;
292 	lockdebuglk_t *lk;
293 
294 	if (panicstr != NULL)
295 		return;
296 
297 	if ((ld = lockdebug_lookup(id, &lk)) == NULL)
298 		return;
299 
300 	if (ld->ld_lock != lock) {
301 		panic("lockdebug_free: destroying uninitialized lock %p"
302 		    "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock);
303 		lockdebug_abort1(ld, lk, __func__, "lock record follows");
304 	}
305 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
306 		lockdebug_abort1(ld, lk, __func__, "is locked");
307 
308 	ld->ld_lock = NULL;
309 
310 	lockdebug_unlock(lk);
311 
312 	lockdebug_lock(&ld_free_lk);
313 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
314 	ld_nfree++;
315 	lockdebug_unlock(&ld_free_lk);
316 }
317 
318 /*
319  * lockdebug_more:
320  *
321  *	Allocate a batch of debug structures and add to the free list.
322  *	Must be called with ld_free_lk held.
323  */
324 static void
325 lockdebug_more(void)
326 {
327 	lockdebug_t *ld;
328 	void *block;
329 	int i, base, m;
330 
331 	while (ld_nfree < LD_SLOP) {
332 		lockdebug_unlock(&ld_free_lk);
333 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
334 		lockdebug_lock(&ld_free_lk);
335 
336 		if (block == NULL)
337 			return;
338 
339 		if (ld_nfree > LD_SLOP) {
340 			/* Somebody beat us to it. */
341 			lockdebug_unlock(&ld_free_lk);
342 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
343 			lockdebug_lock(&ld_free_lk);
344 			continue;
345 		}
346 
347 		base = ld_freeptr;
348 		ld_nfree += LD_BATCH;
349 		ld = block;
350 		base <<= LD_BATCH_SHIFT;
351 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
352 
353 		if (m == LD_MAX_LOCKS)
354 			ld_nomore = true;
355 
356 		for (i = base; i < m; i++, ld++) {
357 			ld->ld_id = i;
358 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
359 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
360 		}
361 
362 		mb_write();
363 		ld_table[ld_freeptr++] = block;
364 	}
365 }
366 
367 /*
368  * lockdebug_wantlock:
369  *
370  *	Process the preamble to a lock acquire.
371  */
372 void
373 lockdebug_wantlock(u_int id, uintptr_t where, int shared)
374 {
375 	struct lwp *l = curlwp;
376 	lockdebuglk_t *lk;
377 	lockdebug_t *ld;
378 	bool recurse;
379 
380 	(void)shared;
381 	recurse = false;
382 
383 	if (panicstr != NULL)
384 		return;
385 
386 	if ((ld = lockdebug_lookup(id, &lk)) == NULL)
387 		return;
388 
389 	if ((ld->ld_flags & LD_LOCKED) != 0) {
390 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
391 			if (ld->ld_lwp == l)
392 				recurse = true;
393 		} else if (ld->ld_cpu == (uint16_t)cpu_number())
394 			recurse = true;
395 	}
396 
397 	if (shared)
398 		ld->ld_shwant++;
399 	else
400 		ld->ld_exwant++;
401 
402 	if (recurse)
403 		lockdebug_abort1(ld, lk, __func__, "locking against myself");
404 
405 	lockdebug_unlock(lk);
406 }
407 
408 /*
409  * lockdebug_locked:
410  *
411  *	Process a lock acquire operation.
412  */
413 void
414 lockdebug_locked(u_int id, uintptr_t where, int shared)
415 {
416 	struct lwp *l = curlwp;
417 	lockdebuglk_t *lk;
418 	lockdebug_t *ld;
419 
420 	if (panicstr != NULL)
421 		return;
422 
423 	if ((ld = lockdebug_lookup(id, &lk)) == NULL)
424 		return;
425 
426 	if (shared) {
427 		l->l_shlocks++;
428 		ld->ld_shares++;
429 		ld->ld_shwant--;
430 	} else {
431 		if ((ld->ld_flags & LD_LOCKED) != 0)
432 			lockdebug_abort1(ld, lk, __func__,
433 			    "already locked");
434 
435 		ld->ld_flags |= LD_LOCKED;
436 		ld->ld_locked = where;
437 		ld->ld_cpu = (uint16_t)cpu_number();
438 		ld->ld_lwp = l;
439 		ld->ld_exwant--;
440 
441 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
442 			l->l_exlocks++;
443 			TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
444 		} else {
445 			curcpu()->ci_spin_locks2++;
446 			TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
447 		}
448 	}
449 
450 	lockdebug_unlock(lk);
451 }
452 
453 /*
454  * lockdebug_unlocked:
455  *
456  *	Process a lock release operation.
457  */
458 void
459 lockdebug_unlocked(u_int id, uintptr_t where, int shared)
460 {
461 	struct lwp *l = curlwp;
462 	lockdebuglk_t *lk;
463 	lockdebug_t *ld;
464 
465 	if (panicstr != NULL)
466 		return;
467 
468 	if ((ld = lockdebug_lookup(id, &lk)) == NULL)
469 		return;
470 
471 	if (shared) {
472 		if (l->l_shlocks == 0)
473 			lockdebug_abort1(ld, lk, __func__,
474 			    "no shared locks held by LWP");
475 		if (ld->ld_shares == 0)
476 			lockdebug_abort1(ld, lk, __func__,
477 			    "no shared holds on this lock");
478 		l->l_shlocks--;
479 		ld->ld_shares--;
480 	} else {
481 		if ((ld->ld_flags & LD_LOCKED) == 0)
482 			lockdebug_abort1(ld, lk, __func__, "not locked");
483 
484 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
485 			if (ld->ld_lwp != curlwp)
486 				lockdebug_abort1(ld, lk, __func__,
487 				    "not held by current LWP");
488 			ld->ld_flags &= ~LD_LOCKED;
489 			ld->ld_unlocked = where;
490 			ld->ld_lwp = NULL;
491 			curlwp->l_exlocks--;
492 			TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
493 		} else {
494 			if (ld->ld_cpu != (uint16_t)cpu_number())
495 				lockdebug_abort1(ld, lk, __func__,
496 				    "not held by current CPU");
497 			ld->ld_flags &= ~LD_LOCKED;
498 			ld->ld_unlocked = where;
499 			ld->ld_lwp = NULL;
500 			curcpu()->ci_spin_locks2--;
501 			TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
502 		}
503 	}
504 
505 	lockdebug_unlock(lk);
506 }
507 
508 /*
509  * lockdebug_barrier:
510  *
511  *	Panic if we hold more than one specified spin lock, and optionally,
512  *	if we hold sleep locks.
513  */
514 void
515 lockdebug_barrier(volatile void *spinlock, int slplocks)
516 {
517 	struct lwp *l = curlwp;
518 	lockdebug_t *ld;
519 	uint16_t cpuno;
520 
521 	if (panicstr != NULL)
522 		return;
523 
524 	if (curcpu()->ci_spin_locks2 != 0) {
525 		cpuno = (uint16_t)cpu_number();
526 
527 		lockdebug_lock(&ld_spinner_lk);
528 		TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
529 			if (ld->ld_lock == spinlock) {
530 				if (ld->ld_cpu != cpuno)
531 					lockdebug_abort1(ld, &ld_spinner_lk,
532 					    __func__,
533 					    "not held by current CPU");
534 				continue;
535 			}
536 			if (ld->ld_cpu == cpuno)
537 				lockdebug_abort1(ld, &ld_spinner_lk,
538 				    __func__, "spin lock held");
539 		}
540 		lockdebug_unlock(&ld_spinner_lk);
541 	}
542 
543 	if (!slplocks) {
544 		if (l->l_exlocks != 0) {
545 			lockdebug_lock(&ld_sleeper_lk);
546 			TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
547 				if (ld->ld_lwp == l)
548 					lockdebug_abort1(ld, &ld_sleeper_lk,
549 					    __func__, "sleep lock held");
550 			}
551 			lockdebug_unlock(&ld_sleeper_lk);
552 		}
553 		if (l->l_shlocks != 0)
554 			panic("lockdebug_barrier: holding %d shared locks",
555 			    l->l_shlocks);
556 	}
557 }
558 
559 /*
560  * lockdebug_dump:
561  *
562  *	Dump information about a lock on panic, or for DDB.
563  */
564 static void
565 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
566 {
567 	int sleeper = (ld->ld_flags & LD_SLEEPER);
568 
569 	(*pr)(
570 	    "lock address : %#018lx type     : %18s\n"
571 	    "shared holds : %18u exclusive: %18u\n"
572 	    "shares wanted: %18u exclusive: %18u\n"
573 	    "current cpu  : %18u last held: %18u\n"
574 	    "current lwp  : %#018lx last held: %#018lx\n"
575 	    "last locked  : %#018lx unlocked : %#018lx\n",
576 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
577 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
578 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
579 	    (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
580 	    (long)curlwp, (long)ld->ld_lwp,
581 	    (long)ld->ld_locked, (long)ld->ld_unlocked);
582 
583 	if (ld->ld_lockops->lo_dump != NULL)
584 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
585 
586 	if (sleeper) {
587 		(*pr)("\n");
588 		turnstile_print(ld->ld_lock, pr);
589 	}
590 }
591 
592 /*
593  * lockdebug_dump:
594  *
595  *	Dump information about a known lock.
596  */
597 static void
598 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
599 		 const char *msg)
600 {
601 
602 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
603 	    func, msg);
604 	lockdebug_dump(ld, printf_nolog);
605 	lockdebug_unlock(lk);
606 	printf_nolog("\n");
607 	panic("LOCKDEBUG");
608 }
609 
610 #endif	/* LOCKDEBUG */
611 
612 /*
613  * lockdebug_lock_print:
614  *
615  *	Handle the DDB 'show lock' command.
616  */
617 #ifdef DDB
618 void
619 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
620 {
621 #ifdef LOCKDEBUG
622 	lockdebug_t *ld;
623 
624 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
625 		if (ld->ld_lock == addr) {
626 			lockdebug_dump(ld, pr);
627 			return;
628 		}
629 	}
630 	(*pr)("Sorry, no record of a lock with address %p found.\n", addr);
631 #else
632 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
633 #endif	/* LOCKDEBUG */
634 }
635 #endif	/* DDB */
636 
637 /*
638  * lockdebug_abort:
639  *
640  *	An error has been trapped - dump lock info and call panic().
641  */
642 void
643 lockdebug_abort(u_int id, volatile void *lock, lockops_t *ops,
644 		const char *func, const char *msg)
645 {
646 #ifdef LOCKDEBUG
647 	lockdebug_t *ld;
648 	lockdebuglk_t *lk;
649 
650 	if ((ld = lockdebug_lookup(id, &lk)) != NULL) {
651 		lockdebug_abort1(ld, lk, func, msg);
652 		/* NOTREACHED */
653 	}
654 #endif	/* LOCKDEBUG */
655 
656 	printf_nolog("%s error: %s: %s\n\n"
657 	    "lock address : %#018lx\n"
658 	    "current cpu  : %18d\n"
659 	    "current lwp  : %#018lx\n",
660 	    ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
661 	    (long)curlwp);
662 
663 	(*ops->lo_dump)(lock);
664 
665 	printf_nolog("\n");
666 	panic("lock error");
667 }
668