1 /* $NetBSD: subr_lockdebug.c,v 1.29 2008/03/27 18:30:15 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Basic lock debugging code shared among lock primitives. 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.29 2008/03/27 18:30:15 ad Exp $"); 45 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/kmem.h> 53 #include <sys/lockdebug.h> 54 #include <sys/sleepq.h> 55 #include <sys/cpu.h> 56 #include <sys/atomic.h> 57 #include <sys/lock.h> 58 59 #include <lib/libkern/rb.h> 60 61 #include <machine/lock.h> 62 63 unsigned int ld_panic; 64 65 #ifdef LOCKDEBUG 66 67 #define LD_BATCH_SHIFT 9 68 #define LD_BATCH (1 << LD_BATCH_SHIFT) 69 #define LD_BATCH_MASK (LD_BATCH - 1) 70 #define LD_MAX_LOCKS 1048576 71 #define LD_SLOP 16 72 73 #define LD_LOCKED 0x01 74 #define LD_SLEEPER 0x02 75 76 #define LD_WRITE_LOCK 0x80000000 77 78 typedef union lockdebuglk { 79 struct { 80 u_int lku_lock; 81 int lku_oldspl; 82 } ul; 83 uint8_t lk_pad[COHERENCY_UNIT]; 84 } volatile __aligned(COHERENCY_UNIT) lockdebuglk_t; 85 86 #define lk_lock ul.lku_lock 87 #define lk_oldspl ul.lku_oldspl 88 89 typedef struct lockdebug { 90 struct rb_node ld_rb_node; /* must be the first member */ 91 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; 92 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; 93 volatile void *ld_lock; 94 lockops_t *ld_lockops; 95 struct lwp *ld_lwp; 96 uintptr_t ld_locked; 97 uintptr_t ld_unlocked; 98 uintptr_t ld_initaddr; 99 uint16_t ld_shares; 100 uint16_t ld_cpu; 101 uint8_t ld_flags; 102 uint8_t ld_shwant; /* advisory */ 103 uint8_t ld_exwant; /* advisory */ 104 uint8_t ld_unused; 105 } volatile lockdebug_t; 106 107 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 108 109 lockdebuglk_t ld_tree_lk; 110 lockdebuglk_t ld_sleeper_lk; 111 lockdebuglk_t ld_spinner_lk; 112 lockdebuglk_t ld_free_lk; 113 114 lockdebuglist_t ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers); 115 lockdebuglist_t ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners); 116 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); 117 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); 118 int ld_nfree; 119 int ld_freeptr; 120 int ld_recurse; 121 bool ld_nomore; 122 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH]; 123 124 lockdebug_t ld_prime[LD_BATCH]; 125 126 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk, 127 const char *, const char *, bool); 128 static void lockdebug_more(void); 129 static void lockdebug_init(void); 130 131 static signed int 132 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2) 133 { 134 const lockdebug_t *ld1 = (const void *)n1; 135 const lockdebug_t *ld2 = (const void *)n2; 136 const uintptr_t a = (uintptr_t)ld1->ld_lock; 137 const uintptr_t b = (uintptr_t)ld2->ld_lock; 138 139 if (a < b) 140 return 1; 141 if (a > b) 142 return -1; 143 return 0; 144 } 145 146 static signed int 147 ld_rb_compare_key(const struct rb_node *n, const void *key) 148 { 149 const lockdebug_t *ld = (const void *)n; 150 const uintptr_t a = (uintptr_t)ld->ld_lock; 151 const uintptr_t b = (uintptr_t)key; 152 153 if (a < b) 154 return 1; 155 if (a > b) 156 return -1; 157 return 0; 158 } 159 160 static struct rb_tree ld_rb_tree; 161 162 static const struct rb_tree_ops ld_rb_tree_ops = { 163 .rb_compare_nodes = ld_rb_compare_nodes, 164 .rb_compare_key = ld_rb_compare_key, 165 }; 166 167 static void 168 lockdebug_lock_init(lockdebuglk_t *lk) 169 { 170 171 lk->lk_lock = 0; 172 } 173 174 static void 175 lockdebug_lock(lockdebuglk_t *lk) 176 { 177 int s; 178 179 s = splhigh(); 180 do { 181 while (lk->lk_lock != 0) { 182 SPINLOCK_SPIN_HOOK; 183 } 184 } while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0); 185 lk->lk_oldspl = s; 186 membar_enter(); 187 } 188 189 static void 190 lockdebug_unlock(lockdebuglk_t *lk) 191 { 192 int s; 193 194 s = lk->lk_oldspl; 195 membar_exit(); 196 lk->lk_lock = 0; 197 splx(s); 198 } 199 200 static int 201 lockdebug_lock_rd(lockdebuglk_t *lk) 202 { 203 u_int val; 204 int s; 205 206 s = splhigh(); 207 do { 208 while ((val = lk->lk_lock) == LD_WRITE_LOCK){ 209 SPINLOCK_SPIN_HOOK; 210 } 211 } while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val); 212 membar_enter(); 213 return s; 214 } 215 216 static void 217 lockdebug_unlock_rd(lockdebuglk_t *lk, int s) 218 { 219 220 membar_exit(); 221 atomic_dec_uint(&lk->lk_lock); 222 splx(s); 223 } 224 225 static inline lockdebug_t * 226 lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk) 227 { 228 lockdebug_t *ld; 229 int s; 230 231 s = lockdebug_lock_rd(&ld_tree_lk); 232 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock)); 233 lockdebug_unlock_rd(&ld_tree_lk, s); 234 if (ld == NULL) 235 return NULL; 236 237 if ((ld->ld_flags & LD_SLEEPER) != 0) 238 *lk = &ld_sleeper_lk; 239 else 240 *lk = &ld_spinner_lk; 241 242 lockdebug_lock(*lk); 243 return ld; 244 } 245 246 /* 247 * lockdebug_lookup: 248 * 249 * Find a lockdebug structure by a pointer to a lock and return it locked. 250 */ 251 static inline lockdebug_t * 252 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk) 253 { 254 lockdebug_t *ld; 255 256 ld = lockdebug_lookup1(lock, lk); 257 if (ld == NULL) 258 panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock); 259 return ld; 260 } 261 262 /* 263 * lockdebug_init: 264 * 265 * Initialize the lockdebug system. Allocate an initial pool of 266 * lockdebug structures before the VM system is up and running. 267 */ 268 static void 269 lockdebug_init(void) 270 { 271 lockdebug_t *ld; 272 int i; 273 274 lockdebug_lock_init(&ld_tree_lk); 275 lockdebug_lock_init(&ld_sleeper_lk); 276 lockdebug_lock_init(&ld_spinner_lk); 277 lockdebug_lock_init(&ld_free_lk); 278 279 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops); 280 281 ld = ld_prime; 282 ld_table[0] = ld; 283 for (i = 1, ld++; i < LD_BATCH; i++, ld++) { 284 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 285 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 286 } 287 ld_freeptr = 1; 288 ld_nfree = LD_BATCH - 1; 289 } 290 291 /* 292 * lockdebug_alloc: 293 * 294 * A lock is being initialized, so allocate an associated debug 295 * structure. 296 */ 297 bool 298 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr) 299 { 300 struct cpu_info *ci; 301 lockdebug_t *ld; 302 lockdebuglk_t *lk; 303 304 if (lo == NULL || panicstr != NULL || ld_panic) 305 return false; 306 if (ld_freeptr == 0) 307 lockdebug_init(); 308 309 if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) { 310 lockdebug_abort1(ld, lk, __func__, "already initialized", true); 311 return false; 312 } 313 314 /* 315 * Pinch a new debug structure. We may recurse because we call 316 * kmem_alloc(), which may need to initialize new locks somewhere 317 * down the path. If not recursing, we try to maintain at least 318 * LD_SLOP structures free, which should hopefully be enough to 319 * satisfy kmem_alloc(). If we can't provide a structure, not to 320 * worry: we'll just mark the lock as not having an ID. 321 */ 322 lockdebug_lock(&ld_free_lk); 323 ci = curcpu(); 324 ci->ci_lkdebug_recurse++; 325 326 if (TAILQ_EMPTY(&ld_free)) { 327 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 328 ci->ci_lkdebug_recurse--; 329 lockdebug_unlock(&ld_free_lk); 330 return false; 331 } 332 lockdebug_more(); 333 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) 334 lockdebug_more(); 335 336 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { 337 lockdebug_unlock(&ld_free_lk); 338 return false; 339 } 340 341 TAILQ_REMOVE(&ld_free, ld, ld_chain); 342 ld_nfree--; 343 344 ci->ci_lkdebug_recurse--; 345 lockdebug_unlock(&ld_free_lk); 346 347 if (ld->ld_lock != NULL) 348 panic("lockdebug_alloc: corrupt table"); 349 350 if (lo->lo_sleeplock) 351 lockdebug_lock(&ld_sleeper_lk); 352 else 353 lockdebug_lock(&ld_spinner_lk); 354 355 /* Initialise the structure. */ 356 ld->ld_lock = lock; 357 ld->ld_lockops = lo; 358 ld->ld_locked = 0; 359 ld->ld_unlocked = 0; 360 ld->ld_lwp = NULL; 361 ld->ld_initaddr = initaddr; 362 363 lockdebug_lock(&ld_tree_lk); 364 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node)); 365 lockdebug_unlock(&ld_tree_lk); 366 367 if (lo->lo_sleeplock) { 368 ld->ld_flags = LD_SLEEPER; 369 lockdebug_unlock(&ld_sleeper_lk); 370 } else { 371 ld->ld_flags = 0; 372 lockdebug_unlock(&ld_spinner_lk); 373 } 374 375 return true; 376 } 377 378 /* 379 * lockdebug_free: 380 * 381 * A lock is being destroyed, so release debugging resources. 382 */ 383 void 384 lockdebug_free(volatile void *lock) 385 { 386 lockdebug_t *ld; 387 lockdebuglk_t *lk; 388 389 if (panicstr != NULL || ld_panic) 390 return; 391 392 ld = lockdebug_lookup(lock, &lk); 393 if (ld == NULL) { 394 panic("lockdebug_free: destroying uninitialized lock %p" 395 "(ld_lock=%p)", lock, ld->ld_lock); 396 lockdebug_abort1(ld, lk, __func__, "lock record follows", 397 true); 398 return; 399 } 400 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 401 lockdebug_abort1(ld, lk, __func__, "is locked", true); 402 return; 403 } 404 lockdebug_lock(&ld_tree_lk); 405 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node)); 406 lockdebug_unlock(&ld_tree_lk); 407 ld->ld_lock = NULL; 408 lockdebug_unlock(lk); 409 410 lockdebug_lock(&ld_free_lk); 411 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 412 ld_nfree++; 413 lockdebug_unlock(&ld_free_lk); 414 } 415 416 /* 417 * lockdebug_more: 418 * 419 * Allocate a batch of debug structures and add to the free list. 420 * Must be called with ld_free_lk held. 421 */ 422 static void 423 lockdebug_more(void) 424 { 425 lockdebug_t *ld; 426 void *block; 427 int i, base, m; 428 429 while (ld_nfree < LD_SLOP) { 430 lockdebug_unlock(&ld_free_lk); 431 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); 432 lockdebug_lock(&ld_free_lk); 433 434 if (block == NULL) 435 return; 436 437 if (ld_nfree > LD_SLOP) { 438 /* Somebody beat us to it. */ 439 lockdebug_unlock(&ld_free_lk); 440 kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); 441 lockdebug_lock(&ld_free_lk); 442 continue; 443 } 444 445 base = ld_freeptr; 446 ld_nfree += LD_BATCH; 447 ld = block; 448 base <<= LD_BATCH_SHIFT; 449 m = min(LD_MAX_LOCKS, base + LD_BATCH); 450 451 if (m == LD_MAX_LOCKS) 452 ld_nomore = true; 453 454 for (i = base; i < m; i++, ld++) { 455 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 456 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 457 } 458 459 membar_producer(); 460 ld_table[ld_freeptr++] = block; 461 } 462 } 463 464 /* 465 * lockdebug_wantlock: 466 * 467 * Process the preamble to a lock acquire. 468 */ 469 void 470 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared) 471 { 472 struct lwp *l = curlwp; 473 lockdebuglk_t *lk; 474 lockdebug_t *ld; 475 bool recurse; 476 477 (void)shared; 478 recurse = false; 479 480 if (panicstr != NULL || ld_panic) 481 return; 482 483 if ((ld = lockdebug_lookup(lock, &lk)) == NULL) 484 return; 485 486 if ((ld->ld_flags & LD_LOCKED) != 0) { 487 if ((ld->ld_flags & LD_SLEEPER) != 0) { 488 if (ld->ld_lwp == l) 489 recurse = true; 490 } else if (ld->ld_cpu == (uint16_t)cpu_number()) 491 recurse = true; 492 } 493 494 if (cpu_intr_p()) { 495 if ((ld->ld_flags & LD_SLEEPER) != 0) { 496 lockdebug_abort1(ld, lk, __func__, 497 "acquiring sleep lock from interrupt context", 498 true); 499 return; 500 } 501 } 502 503 if (shared) 504 ld->ld_shwant++; 505 else 506 ld->ld_exwant++; 507 508 if (recurse) { 509 lockdebug_abort1(ld, lk, __func__, "locking against myself", 510 true); 511 return; 512 } 513 514 lockdebug_unlock(lk); 515 } 516 517 /* 518 * lockdebug_locked: 519 * 520 * Process a lock acquire operation. 521 */ 522 void 523 lockdebug_locked(volatile void *lock, uintptr_t where, int shared) 524 { 525 struct lwp *l = curlwp; 526 lockdebuglk_t *lk; 527 lockdebug_t *ld; 528 529 if (panicstr != NULL || ld_panic) 530 return; 531 532 if ((ld = lockdebug_lookup(lock, &lk)) == NULL) 533 return; 534 535 if (shared) { 536 l->l_shlocks++; 537 ld->ld_shares++; 538 ld->ld_shwant--; 539 } else { 540 if ((ld->ld_flags & LD_LOCKED) != 0) { 541 lockdebug_abort1(ld, lk, __func__, 542 "already locked", true); 543 return; 544 } 545 546 ld->ld_flags |= LD_LOCKED; 547 ld->ld_locked = where; 548 ld->ld_cpu = (uint16_t)cpu_number(); 549 ld->ld_lwp = l; 550 ld->ld_exwant--; 551 552 if ((ld->ld_flags & LD_SLEEPER) != 0) { 553 l->l_exlocks++; 554 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain); 555 } else { 556 curcpu()->ci_spin_locks2++; 557 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain); 558 } 559 } 560 561 lockdebug_unlock(lk); 562 } 563 564 /* 565 * lockdebug_unlocked: 566 * 567 * Process a lock release operation. 568 */ 569 void 570 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared) 571 { 572 struct lwp *l = curlwp; 573 lockdebuglk_t *lk; 574 lockdebug_t *ld; 575 576 if (panicstr != NULL || ld_panic) 577 return; 578 579 if ((ld = lockdebug_lookup(lock, &lk)) == NULL) 580 return; 581 582 if (shared) { 583 if (l->l_shlocks == 0) { 584 lockdebug_abort1(ld, lk, __func__, 585 "no shared locks held by LWP", true); 586 return; 587 } 588 if (ld->ld_shares == 0) { 589 lockdebug_abort1(ld, lk, __func__, 590 "no shared holds on this lock", true); 591 return; 592 } 593 l->l_shlocks--; 594 ld->ld_shares--; 595 } else { 596 if ((ld->ld_flags & LD_LOCKED) == 0) { 597 lockdebug_abort1(ld, lk, __func__, "not locked", 598 true); 599 return; 600 } 601 602 if ((ld->ld_flags & LD_SLEEPER) != 0) { 603 if (ld->ld_lwp != curlwp) { 604 lockdebug_abort1(ld, lk, __func__, 605 "not held by current LWP", true); 606 return; 607 } 608 ld->ld_flags &= ~LD_LOCKED; 609 ld->ld_unlocked = where; 610 ld->ld_lwp = NULL; 611 curlwp->l_exlocks--; 612 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain); 613 } else { 614 if (ld->ld_cpu != (uint16_t)cpu_number()) { 615 lockdebug_abort1(ld, lk, __func__, 616 "not held by current CPU", true); 617 return; 618 } 619 ld->ld_flags &= ~LD_LOCKED; 620 ld->ld_unlocked = where; 621 ld->ld_lwp = NULL; 622 curcpu()->ci_spin_locks2--; 623 TAILQ_REMOVE(&ld_spinners, ld, ld_chain); 624 } 625 } 626 627 lockdebug_unlock(lk); 628 } 629 630 /* 631 * lockdebug_barrier: 632 * 633 * Panic if we hold more than one specified spin lock, and optionally, 634 * if we hold sleep locks. 635 */ 636 void 637 lockdebug_barrier(volatile void *spinlock, int slplocks) 638 { 639 struct lwp *l = curlwp; 640 lockdebug_t *ld; 641 uint16_t cpuno; 642 int s; 643 644 if (panicstr != NULL || ld_panic) 645 return; 646 647 crit_enter(); 648 649 if (curcpu()->ci_spin_locks2 != 0) { 650 cpuno = (uint16_t)cpu_number(); 651 652 s = lockdebug_lock_rd(&ld_spinner_lk); 653 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) { 654 if (ld->ld_lock == spinlock) { 655 if (ld->ld_cpu != cpuno) { 656 lockdebug_abort1(ld, &ld_spinner_lk, 657 __func__, 658 "not held by current CPU", true); 659 return; 660 } 661 continue; 662 } 663 if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0) { 664 lockdebug_abort1(ld, &ld_spinner_lk, 665 __func__, "spin lock held", true); 666 return; 667 } 668 } 669 lockdebug_unlock_rd(&ld_spinner_lk, s); 670 } 671 672 if (!slplocks) { 673 if (l->l_exlocks != 0) { 674 s = lockdebug_lock_rd(&ld_sleeper_lk); 675 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) { 676 if (ld->ld_lwp == l) { 677 lockdebug_abort1(ld, &ld_sleeper_lk, 678 __func__, "sleep lock held", true); 679 return; 680 } 681 } 682 lockdebug_unlock_rd(&ld_sleeper_lk, s); 683 } 684 if (l->l_shlocks != 0) 685 panic("lockdebug_barrier: holding %d shared locks", 686 l->l_shlocks); 687 } 688 689 crit_exit(); 690 } 691 692 /* 693 * lockdebug_mem_check: 694 * 695 * Check for in-use locks within a memory region that is 696 * being freed. 697 */ 698 void 699 lockdebug_mem_check(const char *func, void *base, size_t sz) 700 { 701 lockdebug_t *ld; 702 lockdebuglk_t *lk; 703 int s; 704 705 if (panicstr != NULL || ld_panic) 706 return; 707 708 s = lockdebug_lock_rd(&ld_tree_lk); 709 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); 710 if (ld != NULL) { 711 const uintptr_t lock = (uintptr_t)ld->ld_lock; 712 713 if ((uintptr_t)base > lock) 714 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu", 715 __func__, ld, base, sz); 716 if (lock >= (uintptr_t)base + sz) 717 ld = NULL; 718 } 719 lockdebug_unlock_rd(&ld_tree_lk, s); 720 if (ld == NULL) 721 return; 722 723 if ((ld->ld_flags & LD_SLEEPER) != 0) 724 lk = &ld_sleeper_lk; 725 else 726 lk = &ld_spinner_lk; 727 728 lockdebug_lock(lk); 729 lockdebug_abort1(ld, lk, func, 730 "allocation contains active lock", !cold); 731 } 732 733 /* 734 * lockdebug_dump: 735 * 736 * Dump information about a lock on panic, or for DDB. 737 */ 738 static void 739 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)) 740 { 741 int sleeper = (ld->ld_flags & LD_SLEEPER); 742 743 (*pr)( 744 "lock address : %#018lx type : %18s\n" 745 "shared holds : %18u exclusive: %18u\n" 746 "shares wanted: %18u exclusive: %18u\n" 747 "current cpu : %18u last held: %18u\n" 748 "current lwp : %#018lx last held: %#018lx\n" 749 "last locked : %#018lx unlocked : %#018lx\n" 750 "initialized : %#018lx\n", 751 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), 752 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), 753 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, 754 (unsigned)cpu_number(), (unsigned)ld->ld_cpu, 755 (long)curlwp, (long)ld->ld_lwp, 756 (long)ld->ld_locked, (long)ld->ld_unlocked, 757 (long)ld->ld_initaddr); 758 759 if (ld->ld_lockops->lo_dump != NULL) 760 (*ld->ld_lockops->lo_dump)(ld->ld_lock); 761 762 if (sleeper) { 763 (*pr)("\n"); 764 turnstile_print(ld->ld_lock, pr); 765 } 766 } 767 768 /* 769 * lockdebug_abort1: 770 * 771 * An error has been trapped - dump lock info and panic. 772 */ 773 static void 774 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func, 775 const char *msg, bool dopanic) 776 { 777 778 /* 779 * Don't make the situation wose if the system is already going 780 * down in flames. Once a panic is triggered, lockdebug state 781 * becomes stale and cannot be trusted. 782 */ 783 if (atomic_inc_uint_nv(&ld_panic) != 1) { 784 lockdebug_unlock(lk); 785 return; 786 } 787 788 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name, 789 func, msg); 790 lockdebug_dump(ld, printf_nolog); 791 lockdebug_unlock(lk); 792 printf_nolog("\n"); 793 if (dopanic) 794 panic("LOCKDEBUG"); 795 } 796 797 #endif /* LOCKDEBUG */ 798 799 /* 800 * lockdebug_lock_print: 801 * 802 * Handle the DDB 'show lock' command. 803 */ 804 #ifdef DDB 805 void 806 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...)) 807 { 808 #ifdef LOCKDEBUG 809 lockdebug_t *ld; 810 811 TAILQ_FOREACH(ld, &ld_all, ld_achain) { 812 if (ld->ld_lock == addr) { 813 lockdebug_dump(ld, pr); 814 return; 815 } 816 } 817 (*pr)("Sorry, no record of a lock with address %p found.\n", addr); 818 #else 819 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 820 #endif /* LOCKDEBUG */ 821 } 822 #endif /* DDB */ 823 824 /* 825 * lockdebug_abort: 826 * 827 * An error has been trapped - dump lock info and call panic(). 828 */ 829 void 830 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func, 831 const char *msg) 832 { 833 #ifdef LOCKDEBUG 834 lockdebug_t *ld; 835 lockdebuglk_t *lk; 836 837 if ((ld = lockdebug_lookup(lock, &lk)) != NULL) { 838 lockdebug_abort1(ld, lk, func, msg, true); 839 /* NOTREACHED */ 840 } 841 #endif /* LOCKDEBUG */ 842 843 /* 844 * Complain first on the occurrance only. Otherwise proceeed to 845 * panic where we will `rendezvous' with other CPUs if the machine 846 * is going down in flames. 847 */ 848 if (atomic_inc_uint_nv(&ld_panic) == 1) { 849 printf_nolog("%s error: %s: %s\n\n" 850 "lock address : %#018lx\n" 851 "current cpu : %18d\n" 852 "current lwp : %#018lx\n", 853 ops->lo_name, func, msg, (long)lock, (int)cpu_number(), 854 (long)curlwp); 855 (*ops->lo_dump)(lock); 856 printf_nolog("\n"); 857 } 858 859 panic("lock error"); 860 } 861