1 /* $NetBSD: subr_lockdebug.c,v 1.13 2007/11/11 23:22:24 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Basic lock debugging code shared among lock primitives. 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.13 2007/11/11 23:22:24 matt Exp $"); 45 46 #include "opt_multiprocessor.h" 47 #include "opt_ddb.h" 48 49 #include <sys/param.h> 50 #include <sys/proc.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/kmem.h> 54 #include <sys/lock.h> 55 #include <sys/lockdebug.h> 56 #include <sys/sleepq.h> 57 #include <sys/cpu.h> 58 59 #ifdef LOCKDEBUG 60 61 #define LD_BATCH_SHIFT 9 62 #define LD_BATCH (1 << LD_BATCH_SHIFT) 63 #define LD_BATCH_MASK (LD_BATCH - 1) 64 #define LD_MAX_LOCKS 1048576 65 #define LD_SLOP 16 66 67 #define LD_LOCKED 0x01 68 #define LD_SLEEPER 0x02 69 #define LD_MLOCKS 8 70 #define LD_MLISTS 8192 71 72 #define LD_NOID (LD_MAX_LOCKS + 1) 73 74 typedef union lockdebuglk { 75 struct { 76 __cpu_simple_lock_t lku_lock; 77 int lku_oldspl; 78 } ul; 79 uint8_t lk_pad[64]; 80 } volatile __aligned(64) lockdebuglk_t; 81 82 #define lk_lock ul.lku_lock 83 #define lk_oldspl ul.lku_oldspl 84 85 typedef struct lockdebug { 86 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; 87 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; 88 _TAILQ_ENTRY(struct lockdebug, volatile) ld_mchain; 89 volatile void *ld_lock; 90 lockops_t *ld_lockops; 91 struct lwp *ld_lwp; 92 uintptr_t ld_locked; 93 uintptr_t ld_unlocked; 94 uintptr_t ld_initaddr; 95 u_int ld_id; 96 uint16_t ld_shares; 97 uint16_t ld_cpu; 98 uint8_t ld_flags; 99 uint8_t ld_shwant; /* advisory */ 100 uint8_t ld_exwant; /* advisory */ 101 uint8_t ld_unused; 102 } volatile lockdebug_t; 103 104 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 105 106 lockdebuglk_t ld_sleeper_lk = { .lk_lock = SIMPLELOCK_INITIALIZER }; 107 lockdebuglk_t ld_spinner_lk = { .lk_lock = SIMPLELOCK_INITIALIZER }; 108 lockdebuglk_t ld_free_lk = { .lk_lock = SIMPLELOCK_INITIALIZER }; 109 lockdebuglk_t ld_mem_lk[LD_MLOCKS]; 110 111 lockdebuglist_t ld_mem_list[LD_MLISTS]; 112 lockdebuglist_t ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers); 113 lockdebuglist_t ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners); 114 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); 115 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); 116 int ld_nfree; 117 int ld_freeptr; 118 int ld_recurse; 119 bool ld_nomore; 120 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH]; 121 122 lockdebug_t ld_prime[LD_BATCH]; 123 124 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk, 125 const char *, const char *, bool); 126 static void lockdebug_more(void); 127 static void lockdebug_init(void); 128 129 static inline void 130 lockdebug_lock(lockdebuglk_t *lk) 131 { 132 int s; 133 134 s = splhigh(); 135 __cpu_simple_lock(&lk->lk_lock); 136 lk->lk_oldspl = s; 137 } 138 139 static inline void 140 lockdebug_unlock(lockdebuglk_t *lk) 141 { 142 int s; 143 144 s = lk->lk_oldspl; 145 __cpu_simple_unlock(&(lk->lk_lock)); 146 splx(s); 147 } 148 149 static inline void 150 lockdebug_mhash(volatile void *addr, lockdebuglk_t **lk, lockdebuglist_t **head) 151 { 152 u_int hash; 153 154 hash = (uintptr_t)addr >> PGSHIFT; 155 *lk = &ld_mem_lk[hash & (LD_MLOCKS - 1)]; 156 *head = &ld_mem_list[hash & (LD_MLISTS - 1)]; 157 lockdebug_lock(*lk); 158 } 159 160 /* 161 * lockdebug_lookup: 162 * 163 * Find a lockdebug structure by ID and return it locked. 164 */ 165 static inline lockdebug_t * 166 lockdebug_lookup(u_int id, lockdebuglk_t **lk) 167 { 168 lockdebug_t *base, *ld; 169 170 if (id == LD_NOID) 171 return NULL; 172 173 if (id == 0 || id >= LD_MAX_LOCKS) 174 panic("lockdebug_lookup: uninitialized lock (1, id=%d)", id); 175 176 base = ld_table[id >> LD_BATCH_SHIFT]; 177 ld = base + (id & LD_BATCH_MASK); 178 179 if (base == NULL || ld->ld_lock == NULL || ld->ld_id != id) 180 panic("lockdebug_lookup: uninitialized lock (2, id=%d)", id); 181 182 if ((ld->ld_flags & LD_SLEEPER) != 0) 183 *lk = &ld_sleeper_lk; 184 else 185 *lk = &ld_spinner_lk; 186 187 lockdebug_lock(*lk); 188 return ld; 189 } 190 191 /* 192 * lockdebug_init: 193 * 194 * Initialize the lockdebug system. Allocate an initial pool of 195 * lockdebug structures before the VM system is up and running. 196 */ 197 static void 198 lockdebug_init(void) 199 { 200 lockdebug_t *ld; 201 int i; 202 203 ld = ld_prime; 204 ld_table[0] = ld; 205 for (i = 1, ld++; i < LD_BATCH; i++, ld++) { 206 ld->ld_id = i; 207 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 208 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 209 } 210 ld_freeptr = 1; 211 ld_nfree = LD_BATCH - 1; 212 213 for (i = 0; i < LD_MLOCKS; i++) 214 __cpu_simple_lock_init(&ld_mem_lk[i].lk_lock); 215 for (i = 0; i < LD_MLISTS; i++) 216 TAILQ_INIT(&ld_mem_list[i]); 217 } 218 219 /* 220 * lockdebug_alloc: 221 * 222 * A lock is being initialized, so allocate an associated debug 223 * structure. 224 */ 225 u_int 226 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr) 227 { 228 #if 0 229 lockdebuglist_t *head; 230 lockdebuglk_t *lk; 231 #endif 232 struct cpu_info *ci; 233 lockdebug_t *ld; 234 235 if (lo == NULL || panicstr != NULL) 236 return LD_NOID; 237 if (ld_freeptr == 0) 238 lockdebug_init(); 239 240 ci = curcpu(); 241 242 /* 243 * Pinch a new debug structure. We may recurse because we call 244 * kmem_alloc(), which may need to initialize new locks somewhere 245 * down the path. If not recursing, we try to maintain at least 246 * LD_SLOP structures free, which should hopefully be enough to 247 * satisfy kmem_alloc(). If we can't provide a structure, not to 248 * worry: we'll just mark the lock as not having an ID. 249 */ 250 lockdebug_lock(&ld_free_lk); 251 ci->ci_lkdebug_recurse++; 252 253 if (TAILQ_EMPTY(&ld_free)) { 254 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 255 ci->ci_lkdebug_recurse--; 256 lockdebug_unlock(&ld_free_lk); 257 return LD_NOID; 258 } 259 lockdebug_more(); 260 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) 261 lockdebug_more(); 262 263 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { 264 lockdebug_unlock(&ld_free_lk); 265 return LD_NOID; 266 } 267 268 TAILQ_REMOVE(&ld_free, ld, ld_chain); 269 ld_nfree--; 270 271 ci->ci_lkdebug_recurse--; 272 lockdebug_unlock(&ld_free_lk); 273 274 if (ld->ld_lock != NULL) 275 panic("lockdebug_alloc: corrupt table"); 276 277 if (lo->lo_sleeplock) 278 lockdebug_lock(&ld_sleeper_lk); 279 else 280 lockdebug_lock(&ld_spinner_lk); 281 282 /* Initialise the structure. */ 283 ld->ld_lock = lock; 284 ld->ld_lockops = lo; 285 ld->ld_locked = 0; 286 ld->ld_unlocked = 0; 287 ld->ld_lwp = NULL; 288 ld->ld_initaddr = initaddr; 289 290 if (lo->lo_sleeplock) { 291 ld->ld_flags = LD_SLEEPER; 292 lockdebug_unlock(&ld_sleeper_lk); 293 } else { 294 ld->ld_flags = 0; 295 lockdebug_unlock(&ld_spinner_lk); 296 } 297 298 #if 0 299 /* Insert into address hash. */ 300 lockdebug_mhash(lock, &lk, &head); 301 TAILQ_INSERT_HEAD(head, ld, ld_mchain); 302 lockdebug_unlock(lk); 303 #endif 304 305 return ld->ld_id; 306 } 307 308 /* 309 * lockdebug_free: 310 * 311 * A lock is being destroyed, so release debugging resources. 312 */ 313 void 314 lockdebug_free(volatile void *lock, u_int id) 315 { 316 #if 0 317 lockdebuglist_t *head; 318 #endif 319 lockdebug_t *ld; 320 lockdebuglk_t *lk; 321 322 if (panicstr != NULL) 323 return; 324 325 if ((ld = lockdebug_lookup(id, &lk)) == NULL) 326 return; 327 328 if (ld->ld_lock != lock) { 329 panic("lockdebug_free: destroying uninitialized lock %p" 330 "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock); 331 lockdebug_abort1(ld, lk, __func__, "lock record follows", 332 true); 333 } 334 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) 335 lockdebug_abort1(ld, lk, __func__, "is locked", true); 336 337 ld->ld_lock = NULL; 338 339 lockdebug_unlock(lk); 340 341 lockdebug_lock(&ld_free_lk); 342 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 343 ld_nfree++; 344 lockdebug_unlock(&ld_free_lk); 345 346 #if 0 347 /* Remove from address hash. */ 348 lockdebug_mhash(lock, &lk, &head); 349 TAILQ_REMOVE(head, ld, ld_mchain); 350 lockdebug_unlock(lk); 351 #endif 352 } 353 354 /* 355 * lockdebug_more: 356 * 357 * Allocate a batch of debug structures and add to the free list. 358 * Must be called with ld_free_lk held. 359 */ 360 static void 361 lockdebug_more(void) 362 { 363 lockdebug_t *ld; 364 void *block; 365 int i, base, m; 366 367 while (ld_nfree < LD_SLOP) { 368 lockdebug_unlock(&ld_free_lk); 369 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); 370 lockdebug_lock(&ld_free_lk); 371 372 if (block == NULL) 373 return; 374 375 if (ld_nfree > LD_SLOP) { 376 /* Somebody beat us to it. */ 377 lockdebug_unlock(&ld_free_lk); 378 kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); 379 lockdebug_lock(&ld_free_lk); 380 continue; 381 } 382 383 base = ld_freeptr; 384 ld_nfree += LD_BATCH; 385 ld = block; 386 base <<= LD_BATCH_SHIFT; 387 m = min(LD_MAX_LOCKS, base + LD_BATCH); 388 389 if (m == LD_MAX_LOCKS) 390 ld_nomore = true; 391 392 for (i = base; i < m; i++, ld++) { 393 ld->ld_id = i; 394 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 395 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 396 } 397 398 mb_write(); 399 ld_table[ld_freeptr++] = block; 400 } 401 } 402 403 /* 404 * lockdebug_wantlock: 405 * 406 * Process the preamble to a lock acquire. 407 */ 408 void 409 lockdebug_wantlock(u_int id, uintptr_t where, int shared) 410 { 411 struct lwp *l = curlwp; 412 lockdebuglk_t *lk; 413 lockdebug_t *ld; 414 bool recurse; 415 416 (void)shared; 417 recurse = false; 418 419 if (panicstr != NULL) 420 return; 421 422 if ((ld = lockdebug_lookup(id, &lk)) == NULL) 423 return; 424 425 if ((ld->ld_flags & LD_LOCKED) != 0) { 426 if ((ld->ld_flags & LD_SLEEPER) != 0) { 427 if (ld->ld_lwp == l) 428 recurse = true; 429 } else if (ld->ld_cpu == (uint16_t)cpu_number()) 430 recurse = true; 431 } 432 433 #ifdef notyet 434 if (cpu_intr_p()) { 435 if ((ld->ld_flags & LD_SLEEPER) != 0) 436 lockdebug_abort1(ld, lk, __func__, 437 "acquiring sleep lock from interrupt context", 438 true); 439 } 440 #endif 441 442 if (shared) 443 ld->ld_shwant++; 444 else 445 ld->ld_exwant++; 446 447 if (recurse) 448 lockdebug_abort1(ld, lk, __func__, "locking against myself", 449 true); 450 451 lockdebug_unlock(lk); 452 } 453 454 /* 455 * lockdebug_locked: 456 * 457 * Process a lock acquire operation. 458 */ 459 void 460 lockdebug_locked(u_int id, uintptr_t where, int shared) 461 { 462 struct lwp *l = curlwp; 463 lockdebuglk_t *lk; 464 lockdebug_t *ld; 465 466 if (panicstr != NULL) 467 return; 468 469 if ((ld = lockdebug_lookup(id, &lk)) == NULL) 470 return; 471 472 if (shared) { 473 l->l_shlocks++; 474 ld->ld_shares++; 475 ld->ld_shwant--; 476 } else { 477 if ((ld->ld_flags & LD_LOCKED) != 0) 478 lockdebug_abort1(ld, lk, __func__, 479 "already locked", true); 480 481 ld->ld_flags |= LD_LOCKED; 482 ld->ld_locked = where; 483 ld->ld_cpu = (uint16_t)cpu_number(); 484 ld->ld_lwp = l; 485 ld->ld_exwant--; 486 487 if ((ld->ld_flags & LD_SLEEPER) != 0) { 488 l->l_exlocks++; 489 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain); 490 } else { 491 curcpu()->ci_spin_locks2++; 492 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain); 493 } 494 } 495 496 lockdebug_unlock(lk); 497 } 498 499 /* 500 * lockdebug_unlocked: 501 * 502 * Process a lock release operation. 503 */ 504 void 505 lockdebug_unlocked(u_int id, uintptr_t where, int shared) 506 { 507 struct lwp *l = curlwp; 508 lockdebuglk_t *lk; 509 lockdebug_t *ld; 510 511 if (panicstr != NULL) 512 return; 513 514 if ((ld = lockdebug_lookup(id, &lk)) == NULL) 515 return; 516 517 if (shared) { 518 if (l->l_shlocks == 0) 519 lockdebug_abort1(ld, lk, __func__, 520 "no shared locks held by LWP", true); 521 if (ld->ld_shares == 0) 522 lockdebug_abort1(ld, lk, __func__, 523 "no shared holds on this lock", true); 524 l->l_shlocks--; 525 ld->ld_shares--; 526 } else { 527 if ((ld->ld_flags & LD_LOCKED) == 0) 528 lockdebug_abort1(ld, lk, __func__, "not locked", 529 true); 530 531 if ((ld->ld_flags & LD_SLEEPER) != 0) { 532 if (ld->ld_lwp != curlwp) 533 lockdebug_abort1(ld, lk, __func__, 534 "not held by current LWP", true); 535 ld->ld_flags &= ~LD_LOCKED; 536 ld->ld_unlocked = where; 537 ld->ld_lwp = NULL; 538 curlwp->l_exlocks--; 539 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain); 540 } else { 541 if (ld->ld_cpu != (uint16_t)cpu_number()) 542 lockdebug_abort1(ld, lk, __func__, 543 "not held by current CPU", true); 544 ld->ld_flags &= ~LD_LOCKED; 545 ld->ld_unlocked = where; 546 ld->ld_lwp = NULL; 547 curcpu()->ci_spin_locks2--; 548 TAILQ_REMOVE(&ld_spinners, ld, ld_chain); 549 } 550 } 551 552 lockdebug_unlock(lk); 553 } 554 555 /* 556 * lockdebug_barrier: 557 * 558 * Panic if we hold more than one specified spin lock, and optionally, 559 * if we hold sleep locks. 560 */ 561 void 562 lockdebug_barrier(volatile void *spinlock, int slplocks) 563 { 564 struct lwp *l = curlwp; 565 lockdebug_t *ld; 566 uint16_t cpuno; 567 568 if (panicstr != NULL) 569 return; 570 571 if (curcpu()->ci_spin_locks2 != 0) { 572 cpuno = (uint16_t)cpu_number(); 573 574 lockdebug_lock(&ld_spinner_lk); 575 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) { 576 if (ld->ld_lock == spinlock) { 577 if (ld->ld_cpu != cpuno) 578 lockdebug_abort1(ld, &ld_spinner_lk, 579 __func__, 580 "not held by current CPU", true); 581 continue; 582 } 583 if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0) 584 lockdebug_abort1(ld, &ld_spinner_lk, 585 __func__, "spin lock held", true); 586 } 587 lockdebug_unlock(&ld_spinner_lk); 588 } 589 590 if (!slplocks) { 591 if (l->l_exlocks != 0) { 592 lockdebug_lock(&ld_sleeper_lk); 593 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) { 594 if (ld->ld_lwp == l) 595 lockdebug_abort1(ld, &ld_sleeper_lk, 596 __func__, "sleep lock held", true); 597 } 598 lockdebug_unlock(&ld_sleeper_lk); 599 } 600 if (l->l_shlocks != 0) 601 panic("lockdebug_barrier: holding %d shared locks", 602 l->l_shlocks); 603 } 604 } 605 606 /* 607 * lockdebug_mem_check: 608 * 609 * Check for in-use locks within a memory region that is 610 * being freed. We only check for active locks within the 611 * first page of the allocation. 612 */ 613 void 614 lockdebug_mem_check(const char *func, void *base, size_t sz) 615 { 616 #if 0 617 lockdebuglist_t *head; 618 lockdebuglk_t *lk; 619 lockdebug_t *ld; 620 uintptr_t sa, ea, la; 621 622 sa = (uintptr_t)base; 623 ea = sa + sz; 624 625 lockdebug_mhash(base, &lk, &head); 626 TAILQ_FOREACH(ld, head, ld_mchain) { 627 la = (uintptr_t)ld->ld_lock; 628 if (la >= sa && la < ea) { 629 lockdebug_abort1(ld, lk, func, 630 "allocation contains active lock", !cold); 631 return; 632 } 633 } 634 lockdebug_unlock(lk); 635 #endif 636 } 637 638 /* 639 * lockdebug_dump: 640 * 641 * Dump information about a lock on panic, or for DDB. 642 */ 643 static void 644 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)) 645 { 646 int sleeper = (ld->ld_flags & LD_SLEEPER); 647 648 (*pr)( 649 "lock address : %#018lx type : %18s\n" 650 "shared holds : %18u exclusive: %18u\n" 651 "shares wanted: %18u exclusive: %18u\n" 652 "current cpu : %18u last held: %18u\n" 653 "current lwp : %#018lx last held: %#018lx\n" 654 "last locked : %#018lx unlocked : %#018lx\n" 655 "initialized : %#018lx\n", 656 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), 657 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), 658 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, 659 (unsigned)cpu_number(), (unsigned)ld->ld_cpu, 660 (long)curlwp, (long)ld->ld_lwp, 661 (long)ld->ld_locked, (long)ld->ld_unlocked, 662 (long)ld->ld_initaddr); 663 664 if (ld->ld_lockops->lo_dump != NULL) 665 (*ld->ld_lockops->lo_dump)(ld->ld_lock); 666 667 if (sleeper) { 668 (*pr)("\n"); 669 turnstile_print(ld->ld_lock, pr); 670 } 671 } 672 673 /* 674 * lockdebug_dump: 675 * 676 * Dump information about a known lock. 677 */ 678 static void 679 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func, 680 const char *msg, bool dopanic) 681 { 682 683 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name, 684 func, msg); 685 lockdebug_dump(ld, printf_nolog); 686 lockdebug_unlock(lk); 687 printf_nolog("\n"); 688 if (dopanic) 689 panic("LOCKDEBUG"); 690 } 691 692 #endif /* LOCKDEBUG */ 693 694 /* 695 * lockdebug_lock_print: 696 * 697 * Handle the DDB 'show lock' command. 698 */ 699 #ifdef DDB 700 void 701 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...)) 702 { 703 #ifdef LOCKDEBUG 704 lockdebug_t *ld; 705 706 TAILQ_FOREACH(ld, &ld_all, ld_achain) { 707 if (ld->ld_lock == addr) { 708 lockdebug_dump(ld, pr); 709 return; 710 } 711 } 712 (*pr)("Sorry, no record of a lock with address %p found.\n", addr); 713 #else 714 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 715 #endif /* LOCKDEBUG */ 716 } 717 #endif /* DDB */ 718 719 /* 720 * lockdebug_abort: 721 * 722 * An error has been trapped - dump lock info and call panic(). 723 */ 724 void 725 lockdebug_abort(u_int id, volatile void *lock, lockops_t *ops, 726 const char *func, const char *msg) 727 { 728 #ifdef LOCKDEBUG 729 lockdebug_t *ld; 730 lockdebuglk_t *lk; 731 732 if ((ld = lockdebug_lookup(id, &lk)) != NULL) { 733 lockdebug_abort1(ld, lk, func, msg, true); 734 /* NOTREACHED */ 735 } 736 #endif /* LOCKDEBUG */ 737 738 printf_nolog("%s error: %s: %s\n\n" 739 "lock address : %#018lx\n" 740 "current cpu : %18d\n" 741 "current lwp : %#018lx\n", 742 ops->lo_name, func, msg, (long)lock, (int)cpu_number(), 743 (long)curlwp); 744 745 (*ops->lo_dump)(lock); 746 747 printf_nolog("\n"); 748 panic("lock error"); 749 } 750