1 /* $NetBSD: subr_lockdebug.c,v 1.10 2007/10/11 19:45:25 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Basic lock debugging code shared among lock primatives. 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.10 2007/10/11 19:45:25 ad Exp $"); 45 46 #include "opt_multiprocessor.h" 47 #include "opt_ddb.h" 48 49 #include <sys/param.h> 50 #include <sys/proc.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/kmem.h> 54 #include <sys/lock.h> 55 #include <sys/lockdebug.h> 56 #include <sys/sleepq.h> 57 #include <sys/cpu.h> 58 59 #ifdef LOCKDEBUG 60 61 #define LD_BATCH_SHIFT 9 62 #define LD_BATCH (1 << LD_BATCH_SHIFT) 63 #define LD_BATCH_MASK (LD_BATCH - 1) 64 #define LD_MAX_LOCKS 1048576 65 #define LD_SLOP 16 66 67 #define LD_LOCKED 0x01 68 #define LD_SLEEPER 0x02 69 #define LD_MLOCKS 8 70 #define LD_MLISTS 8192 71 72 #define LD_NOID (LD_MAX_LOCKS + 1) 73 74 typedef union lockdebuglk { 75 struct { 76 __cpu_simple_lock_t lku_lock; 77 int lku_oldspl; 78 } ul; 79 uint8_t lk_pad[64]; 80 } volatile __aligned(64) lockdebuglk_t; 81 82 #define lk_lock ul.lku_lock 83 #define lk_oldspl ul.lku_oldspl 84 85 typedef struct lockdebug { 86 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; 87 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; 88 _TAILQ_ENTRY(struct lockdebug, volatile) ld_mchain; 89 volatile void *ld_lock; 90 lockops_t *ld_lockops; 91 struct lwp *ld_lwp; 92 uintptr_t ld_locked; 93 uintptr_t ld_unlocked; 94 uintptr_t ld_initaddr; 95 u_int ld_id; 96 uint16_t ld_shares; 97 uint16_t ld_cpu; 98 uint8_t ld_flags; 99 uint8_t ld_shwant; /* advisory */ 100 uint8_t ld_exwant; /* advisory */ 101 uint8_t ld_unused; 102 } volatile lockdebug_t; 103 104 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 105 106 lockdebuglk_t ld_sleeper_lk; 107 lockdebuglk_t ld_spinner_lk; 108 lockdebuglk_t ld_free_lk; 109 lockdebuglk_t ld_mem_lk[LD_MLOCKS]; 110 111 lockdebuglist_t ld_mem_list[LD_MLISTS]; 112 lockdebuglist_t ld_sleepers; 113 lockdebuglist_t ld_spinners; 114 lockdebuglist_t ld_free; 115 lockdebuglist_t ld_all; 116 int ld_nfree; 117 int ld_freeptr; 118 int ld_recurse; 119 bool ld_nomore; 120 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH]; 121 122 lockdebug_t ld_prime[LD_BATCH]; 123 124 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk, 125 const char *, const char *, bool); 126 static void lockdebug_more(void); 127 static void lockdebug_init(void); 128 129 static inline void 130 lockdebug_lock(lockdebuglk_t *lk) 131 { 132 int s; 133 134 s = splhigh(); 135 __cpu_simple_lock(&lk->lk_lock); 136 lk->lk_oldspl = s; 137 } 138 139 static inline void 140 lockdebug_unlock(lockdebuglk_t *lk) 141 { 142 int s; 143 144 s = lk->lk_oldspl; 145 __cpu_simple_unlock(&(lk->lk_lock)); 146 splx(s); 147 } 148 149 static inline void 150 lockdebug_mhash(volatile void *addr, lockdebuglk_t **lk, lockdebuglist_t **head) 151 { 152 u_int hash; 153 154 hash = (uintptr_t)addr >> PGSHIFT; 155 *lk = &ld_mem_lk[hash & (LD_MLOCKS - 1)]; 156 *head = &ld_mem_list[hash & (LD_MLISTS - 1)]; 157 lockdebug_lock(*lk); 158 } 159 160 /* 161 * lockdebug_lookup: 162 * 163 * Find a lockdebug structure by ID and return it locked. 164 */ 165 static inline lockdebug_t * 166 lockdebug_lookup(u_int id, lockdebuglk_t **lk) 167 { 168 lockdebug_t *base, *ld; 169 170 if (id == LD_NOID) 171 return NULL; 172 173 if (id == 0 || id >= LD_MAX_LOCKS) 174 panic("lockdebug_lookup: uninitialized lock (1, id=%d)", id); 175 176 base = ld_table[id >> LD_BATCH_SHIFT]; 177 ld = base + (id & LD_BATCH_MASK); 178 179 if (base == NULL || ld->ld_lock == NULL || ld->ld_id != id) 180 panic("lockdebug_lookup: uninitialized lock (2, id=%d)", id); 181 182 if ((ld->ld_flags & LD_SLEEPER) != 0) 183 *lk = &ld_sleeper_lk; 184 else 185 *lk = &ld_spinner_lk; 186 187 lockdebug_lock(*lk); 188 return ld; 189 } 190 191 /* 192 * lockdebug_init: 193 * 194 * Initialize the lockdebug system. Allocate an initial pool of 195 * lockdebug structures before the VM system is up and running. 196 */ 197 static void 198 lockdebug_init(void) 199 { 200 lockdebug_t *ld; 201 int i; 202 203 __cpu_simple_lock_init(&ld_sleeper_lk.lk_lock); 204 __cpu_simple_lock_init(&ld_spinner_lk.lk_lock); 205 __cpu_simple_lock_init(&ld_free_lk.lk_lock); 206 207 TAILQ_INIT(&ld_free); 208 TAILQ_INIT(&ld_all); 209 TAILQ_INIT(&ld_sleepers); 210 TAILQ_INIT(&ld_spinners); 211 212 ld = ld_prime; 213 ld_table[0] = ld; 214 for (i = 1, ld++; i < LD_BATCH; i++, ld++) { 215 ld->ld_id = i; 216 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 217 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 218 } 219 ld_freeptr = 1; 220 ld_nfree = LD_BATCH - 1; 221 222 for (i = 0; i < LD_MLOCKS; i++) 223 __cpu_simple_lock_init(&ld_mem_lk[i].lk_lock); 224 for (i = 0; i < LD_MLISTS; i++) 225 TAILQ_INIT(&ld_mem_list[i]); 226 } 227 228 /* 229 * lockdebug_alloc: 230 * 231 * A lock is being initialized, so allocate an associated debug 232 * structure. 233 */ 234 u_int 235 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr) 236 { 237 #if 0 238 lockdebuglist_t *head; 239 lockdebuglk_t *lk; 240 #endif 241 struct cpu_info *ci; 242 lockdebug_t *ld; 243 244 if (lo == NULL || panicstr != NULL) 245 return LD_NOID; 246 if (ld_freeptr == 0) 247 lockdebug_init(); 248 249 ci = curcpu(); 250 251 /* 252 * Pinch a new debug structure. We may recurse because we call 253 * kmem_alloc(), which may need to initialize new locks somewhere 254 * down the path. If not recursing, we try to maintain at least 255 * LD_SLOP structures free, which should hopefully be enough to 256 * satisfy kmem_alloc(). If we can't provide a structure, not to 257 * worry: we'll just mark the lock as not having an ID. 258 */ 259 lockdebug_lock(&ld_free_lk); 260 ci->ci_lkdebug_recurse++; 261 262 if (TAILQ_EMPTY(&ld_free)) { 263 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 264 ci->ci_lkdebug_recurse--; 265 lockdebug_unlock(&ld_free_lk); 266 return LD_NOID; 267 } 268 lockdebug_more(); 269 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) 270 lockdebug_more(); 271 272 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { 273 lockdebug_unlock(&ld_free_lk); 274 return LD_NOID; 275 } 276 277 TAILQ_REMOVE(&ld_free, ld, ld_chain); 278 ld_nfree--; 279 280 ci->ci_lkdebug_recurse--; 281 lockdebug_unlock(&ld_free_lk); 282 283 if (ld->ld_lock != NULL) 284 panic("lockdebug_alloc: corrupt table"); 285 286 if (lo->lo_sleeplock) 287 lockdebug_lock(&ld_sleeper_lk); 288 else 289 lockdebug_lock(&ld_spinner_lk); 290 291 /* Initialise the structure. */ 292 ld->ld_lock = lock; 293 ld->ld_lockops = lo; 294 ld->ld_locked = 0; 295 ld->ld_unlocked = 0; 296 ld->ld_lwp = NULL; 297 ld->ld_initaddr = initaddr; 298 299 if (lo->lo_sleeplock) { 300 ld->ld_flags = LD_SLEEPER; 301 lockdebug_unlock(&ld_sleeper_lk); 302 } else { 303 ld->ld_flags = 0; 304 lockdebug_unlock(&ld_spinner_lk); 305 } 306 307 #if 0 308 /* Insert into address hash. */ 309 lockdebug_mhash(lock, &lk, &head); 310 TAILQ_INSERT_HEAD(head, ld, ld_mchain); 311 lockdebug_unlock(lk); 312 #endif 313 314 return ld->ld_id; 315 } 316 317 /* 318 * lockdebug_free: 319 * 320 * A lock is being destroyed, so release debugging resources. 321 */ 322 void 323 lockdebug_free(volatile void *lock, u_int id) 324 { 325 #if 0 326 lockdebuglist_t *head; 327 #endif 328 lockdebug_t *ld; 329 lockdebuglk_t *lk; 330 331 if (panicstr != NULL) 332 return; 333 334 if ((ld = lockdebug_lookup(id, &lk)) == NULL) 335 return; 336 337 if (ld->ld_lock != lock) { 338 panic("lockdebug_free: destroying uninitialized lock %p" 339 "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock); 340 lockdebug_abort1(ld, lk, __func__, "lock record follows", 341 true); 342 } 343 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) 344 lockdebug_abort1(ld, lk, __func__, "is locked", true); 345 346 ld->ld_lock = NULL; 347 348 lockdebug_unlock(lk); 349 350 lockdebug_lock(&ld_free_lk); 351 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 352 ld_nfree++; 353 lockdebug_unlock(&ld_free_lk); 354 355 #if 0 356 /* Remove from address hash. */ 357 lockdebug_mhash(lock, &lk, &head); 358 TAILQ_REMOVE(head, ld, ld_mchain); 359 lockdebug_unlock(lk); 360 #endif 361 } 362 363 /* 364 * lockdebug_more: 365 * 366 * Allocate a batch of debug structures and add to the free list. 367 * Must be called with ld_free_lk held. 368 */ 369 static void 370 lockdebug_more(void) 371 { 372 lockdebug_t *ld; 373 void *block; 374 int i, base, m; 375 376 while (ld_nfree < LD_SLOP) { 377 lockdebug_unlock(&ld_free_lk); 378 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); 379 lockdebug_lock(&ld_free_lk); 380 381 if (block == NULL) 382 return; 383 384 if (ld_nfree > LD_SLOP) { 385 /* Somebody beat us to it. */ 386 lockdebug_unlock(&ld_free_lk); 387 kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); 388 lockdebug_lock(&ld_free_lk); 389 continue; 390 } 391 392 base = ld_freeptr; 393 ld_nfree += LD_BATCH; 394 ld = block; 395 base <<= LD_BATCH_SHIFT; 396 m = min(LD_MAX_LOCKS, base + LD_BATCH); 397 398 if (m == LD_MAX_LOCKS) 399 ld_nomore = true; 400 401 for (i = base; i < m; i++, ld++) { 402 ld->ld_id = i; 403 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 404 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 405 } 406 407 mb_write(); 408 ld_table[ld_freeptr++] = block; 409 } 410 } 411 412 /* 413 * lockdebug_wantlock: 414 * 415 * Process the preamble to a lock acquire. 416 */ 417 void 418 lockdebug_wantlock(u_int id, uintptr_t where, int shared) 419 { 420 struct lwp *l = curlwp; 421 lockdebuglk_t *lk; 422 lockdebug_t *ld; 423 bool recurse; 424 425 (void)shared; 426 recurse = false; 427 428 if (panicstr != NULL) 429 return; 430 431 if ((ld = lockdebug_lookup(id, &lk)) == NULL) 432 return; 433 434 if ((ld->ld_flags & LD_LOCKED) != 0) { 435 if ((ld->ld_flags & LD_SLEEPER) != 0) { 436 if (ld->ld_lwp == l) 437 recurse = true; 438 } else if (ld->ld_cpu == (uint16_t)cpu_number()) 439 recurse = true; 440 } 441 442 #ifdef notyet 443 if (cpu_intr_p()) { 444 if ((ld->ld_flags & LD_SLEEPER) != 0) 445 lockdebug_abort1(ld, lk, __func__, 446 "acquiring sleep lock from interrupt context", 447 true); 448 } 449 #endif 450 451 if (shared) 452 ld->ld_shwant++; 453 else 454 ld->ld_exwant++; 455 456 if (recurse) 457 lockdebug_abort1(ld, lk, __func__, "locking against myself", 458 true); 459 460 lockdebug_unlock(lk); 461 } 462 463 /* 464 * lockdebug_locked: 465 * 466 * Process a lock acquire operation. 467 */ 468 void 469 lockdebug_locked(u_int id, uintptr_t where, int shared) 470 { 471 struct lwp *l = curlwp; 472 lockdebuglk_t *lk; 473 lockdebug_t *ld; 474 475 if (panicstr != NULL) 476 return; 477 478 if ((ld = lockdebug_lookup(id, &lk)) == NULL) 479 return; 480 481 if (shared) { 482 l->l_shlocks++; 483 ld->ld_shares++; 484 ld->ld_shwant--; 485 } else { 486 if ((ld->ld_flags & LD_LOCKED) != 0) 487 lockdebug_abort1(ld, lk, __func__, 488 "already locked", true); 489 490 ld->ld_flags |= LD_LOCKED; 491 ld->ld_locked = where; 492 ld->ld_cpu = (uint16_t)cpu_number(); 493 ld->ld_lwp = l; 494 ld->ld_exwant--; 495 496 if ((ld->ld_flags & LD_SLEEPER) != 0) { 497 l->l_exlocks++; 498 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain); 499 } else { 500 curcpu()->ci_spin_locks2++; 501 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain); 502 } 503 } 504 505 lockdebug_unlock(lk); 506 } 507 508 /* 509 * lockdebug_unlocked: 510 * 511 * Process a lock release operation. 512 */ 513 void 514 lockdebug_unlocked(u_int id, uintptr_t where, int shared) 515 { 516 struct lwp *l = curlwp; 517 lockdebuglk_t *lk; 518 lockdebug_t *ld; 519 520 if (panicstr != NULL) 521 return; 522 523 if ((ld = lockdebug_lookup(id, &lk)) == NULL) 524 return; 525 526 if (shared) { 527 if (l->l_shlocks == 0) 528 lockdebug_abort1(ld, lk, __func__, 529 "no shared locks held by LWP", true); 530 if (ld->ld_shares == 0) 531 lockdebug_abort1(ld, lk, __func__, 532 "no shared holds on this lock", true); 533 l->l_shlocks--; 534 ld->ld_shares--; 535 } else { 536 if ((ld->ld_flags & LD_LOCKED) == 0) 537 lockdebug_abort1(ld, lk, __func__, "not locked", 538 true); 539 540 if ((ld->ld_flags & LD_SLEEPER) != 0) { 541 if (ld->ld_lwp != curlwp) 542 lockdebug_abort1(ld, lk, __func__, 543 "not held by current LWP", true); 544 ld->ld_flags &= ~LD_LOCKED; 545 ld->ld_unlocked = where; 546 ld->ld_lwp = NULL; 547 curlwp->l_exlocks--; 548 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain); 549 } else { 550 if (ld->ld_cpu != (uint16_t)cpu_number()) 551 lockdebug_abort1(ld, lk, __func__, 552 "not held by current CPU", true); 553 ld->ld_flags &= ~LD_LOCKED; 554 ld->ld_unlocked = where; 555 ld->ld_lwp = NULL; 556 curcpu()->ci_spin_locks2--; 557 TAILQ_REMOVE(&ld_spinners, ld, ld_chain); 558 } 559 } 560 561 lockdebug_unlock(lk); 562 } 563 564 /* 565 * lockdebug_barrier: 566 * 567 * Panic if we hold more than one specified spin lock, and optionally, 568 * if we hold sleep locks. 569 */ 570 void 571 lockdebug_barrier(volatile void *spinlock, int slplocks) 572 { 573 struct lwp *l = curlwp; 574 lockdebug_t *ld; 575 uint16_t cpuno; 576 577 if (panicstr != NULL) 578 return; 579 580 if (curcpu()->ci_spin_locks2 != 0) { 581 cpuno = (uint16_t)cpu_number(); 582 583 lockdebug_lock(&ld_spinner_lk); 584 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) { 585 if (ld->ld_lock == spinlock) { 586 if (ld->ld_cpu != cpuno) 587 lockdebug_abort1(ld, &ld_spinner_lk, 588 __func__, 589 "not held by current CPU", true); 590 continue; 591 } 592 if (ld->ld_cpu == cpuno && (l->l_flag & LW_INTR) == 0) 593 lockdebug_abort1(ld, &ld_spinner_lk, 594 __func__, "spin lock held", true); 595 } 596 lockdebug_unlock(&ld_spinner_lk); 597 } 598 599 if (!slplocks) { 600 if (l->l_exlocks != 0) { 601 lockdebug_lock(&ld_sleeper_lk); 602 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) { 603 if (ld->ld_lwp == l) 604 lockdebug_abort1(ld, &ld_sleeper_lk, 605 __func__, "sleep lock held", true); 606 } 607 lockdebug_unlock(&ld_sleeper_lk); 608 } 609 if (l->l_shlocks != 0) 610 panic("lockdebug_barrier: holding %d shared locks", 611 l->l_shlocks); 612 } 613 } 614 615 /* 616 * lockdebug_mem_check: 617 * 618 * Check for in-use locks within a memory region that is 619 * being freed. We only check for active locks within the 620 * first page of the allocation. 621 */ 622 void 623 lockdebug_mem_check(const char *func, void *base, size_t sz) 624 { 625 #if 0 626 lockdebuglist_t *head; 627 lockdebuglk_t *lk; 628 lockdebug_t *ld; 629 uintptr_t sa, ea, la; 630 631 sa = (uintptr_t)base; 632 ea = sa + sz; 633 634 lockdebug_mhash(base, &lk, &head); 635 TAILQ_FOREACH(ld, head, ld_mchain) { 636 la = (uintptr_t)ld->ld_lock; 637 if (la >= sa && la < ea) { 638 lockdebug_abort1(ld, lk, func, 639 "allocation contains active lock", !cold); 640 return; 641 } 642 } 643 lockdebug_unlock(lk); 644 #endif 645 } 646 647 /* 648 * lockdebug_dump: 649 * 650 * Dump information about a lock on panic, or for DDB. 651 */ 652 static void 653 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)) 654 { 655 int sleeper = (ld->ld_flags & LD_SLEEPER); 656 657 (*pr)( 658 "lock address : %#018lx type : %18s\n" 659 "shared holds : %18u exclusive: %18u\n" 660 "shares wanted: %18u exclusive: %18u\n" 661 "current cpu : %18u last held: %18u\n" 662 "current lwp : %#018lx last held: %#018lx\n" 663 "last locked : %#018lx unlocked : %#018lx\n" 664 "initialized : %#018lx\n", 665 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), 666 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), 667 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, 668 (unsigned)cpu_number(), (unsigned)ld->ld_cpu, 669 (long)curlwp, (long)ld->ld_lwp, 670 (long)ld->ld_locked, (long)ld->ld_unlocked, 671 (long)ld->ld_initaddr); 672 673 if (ld->ld_lockops->lo_dump != NULL) 674 (*ld->ld_lockops->lo_dump)(ld->ld_lock); 675 676 if (sleeper) { 677 (*pr)("\n"); 678 turnstile_print(ld->ld_lock, pr); 679 } 680 } 681 682 /* 683 * lockdebug_dump: 684 * 685 * Dump information about a known lock. 686 */ 687 static void 688 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func, 689 const char *msg, bool dopanic) 690 { 691 692 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name, 693 func, msg); 694 lockdebug_dump(ld, printf_nolog); 695 lockdebug_unlock(lk); 696 printf_nolog("\n"); 697 if (dopanic) 698 panic("LOCKDEBUG"); 699 } 700 701 #endif /* LOCKDEBUG */ 702 703 /* 704 * lockdebug_lock_print: 705 * 706 * Handle the DDB 'show lock' command. 707 */ 708 #ifdef DDB 709 void 710 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...)) 711 { 712 #ifdef LOCKDEBUG 713 lockdebug_t *ld; 714 715 TAILQ_FOREACH(ld, &ld_all, ld_achain) { 716 if (ld->ld_lock == addr) { 717 lockdebug_dump(ld, pr); 718 return; 719 } 720 } 721 (*pr)("Sorry, no record of a lock with address %p found.\n", addr); 722 #else 723 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 724 #endif /* LOCKDEBUG */ 725 } 726 #endif /* DDB */ 727 728 /* 729 * lockdebug_abort: 730 * 731 * An error has been trapped - dump lock info and call panic(). 732 */ 733 void 734 lockdebug_abort(u_int id, volatile void *lock, lockops_t *ops, 735 const char *func, const char *msg) 736 { 737 #ifdef LOCKDEBUG 738 lockdebug_t *ld; 739 lockdebuglk_t *lk; 740 741 if ((ld = lockdebug_lookup(id, &lk)) != NULL) { 742 lockdebug_abort1(ld, lk, func, msg, true); 743 /* NOTREACHED */ 744 } 745 #endif /* LOCKDEBUG */ 746 747 printf_nolog("%s error: %s: %s\n\n" 748 "lock address : %#018lx\n" 749 "current cpu : %18d\n" 750 "current lwp : %#018lx\n", 751 ops->lo_name, func, msg, (long)lock, (int)cpu_number(), 752 (long)curlwp); 753 754 (*ops->lo_dump)(lock); 755 756 printf_nolog("\n"); 757 panic("lock error"); 758 } 759