1 /* $NetBSD: subr_lockdebug.c,v 1.75 2020/03/09 01:47:50 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Basic lock debugging code shared among lock primitives. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.75 2020/03/09 01:47:50 christos Exp $"); 38 39 #ifdef _KERNEL_OPT 40 #include "opt_ddb.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/proc.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/kmem.h> 48 #include <sys/lockdebug.h> 49 #include <sys/sleepq.h> 50 #include <sys/cpu.h> 51 #include <sys/atomic.h> 52 #include <sys/lock.h> 53 #include <sys/rbtree.h> 54 #include <sys/ksyms.h> 55 56 #include <machine/lock.h> 57 58 unsigned int ld_panic; 59 60 #ifdef LOCKDEBUG 61 62 #ifdef __ia64__ 63 #define LD_BATCH_SHIFT 16 64 #else 65 #define LD_BATCH_SHIFT 9 66 #endif 67 #define LD_BATCH (1 << LD_BATCH_SHIFT) 68 #define LD_BATCH_MASK (LD_BATCH - 1) 69 #define LD_MAX_LOCKS 1048576 70 #define LD_SLOP 16 71 72 #define LD_LOCKED 0x01 73 #define LD_SLEEPER 0x02 74 75 #define LD_WRITE_LOCK 0x80000000 76 77 typedef struct lockdebug { 78 struct rb_node ld_rb_node; 79 __cpu_simple_lock_t ld_spinlock; 80 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; 81 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; 82 volatile void *ld_lock; 83 lockops_t *ld_lockops; 84 struct lwp *ld_lwp; 85 uintptr_t ld_locked; 86 uintptr_t ld_unlocked; 87 uintptr_t ld_initaddr; 88 uint16_t ld_shares; 89 uint16_t ld_cpu; 90 uint8_t ld_flags; 91 uint8_t ld_shwant; /* advisory */ 92 uint8_t ld_exwant; /* advisory */ 93 uint8_t ld_unused; 94 } volatile lockdebug_t; 95 96 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 97 98 __cpu_simple_lock_t ld_mod_lk; 99 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); 100 #ifdef _KERNEL 101 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); 102 #else 103 extern lockdebuglist_t ld_all; 104 #define cpu_name(a) "?" 105 #define cpu_index(a) -1 106 #define curlwp NULL 107 #endif /* _KERNEL */ 108 int ld_nfree; 109 int ld_freeptr; 110 int ld_recurse; 111 bool ld_nomore; 112 lockdebug_t ld_prime[LD_BATCH]; 113 114 #ifdef _KERNEL 115 static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int, 116 const char *, bool); 117 static int lockdebug_more(int); 118 static void lockdebug_init(void); 119 static void lockdebug_dump(lwp_t *, lockdebug_t *, 120 void (*)(const char *, ...) 121 __printflike(1, 2)); 122 123 static signed int 124 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2) 125 { 126 const lockdebug_t *ld1 = n1; 127 const lockdebug_t *ld2 = n2; 128 const uintptr_t a = (uintptr_t)ld1->ld_lock; 129 const uintptr_t b = (uintptr_t)ld2->ld_lock; 130 131 if (a < b) 132 return -1; 133 if (a > b) 134 return 1; 135 return 0; 136 } 137 138 static signed int 139 ld_rbto_compare_key(void *ctx, const void *n, const void *key) 140 { 141 const lockdebug_t *ld = n; 142 const uintptr_t a = (uintptr_t)ld->ld_lock; 143 const uintptr_t b = (uintptr_t)key; 144 145 if (a < b) 146 return -1; 147 if (a > b) 148 return 1; 149 return 0; 150 } 151 152 static rb_tree_t ld_rb_tree; 153 154 static const rb_tree_ops_t ld_rb_tree_ops = { 155 .rbto_compare_nodes = ld_rbto_compare_nodes, 156 .rbto_compare_key = ld_rbto_compare_key, 157 .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node), 158 .rbto_context = NULL 159 }; 160 161 static inline lockdebug_t * 162 lockdebug_lookup1(const volatile void *lock) 163 { 164 lockdebug_t *ld; 165 struct cpu_info *ci; 166 167 ci = curcpu(); 168 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 169 ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock); 170 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 171 if (ld == NULL) { 172 return NULL; 173 } 174 __cpu_simple_lock(&ld->ld_spinlock); 175 176 return ld; 177 } 178 179 static void 180 lockdebug_lock_cpus(void) 181 { 182 CPU_INFO_ITERATOR cii; 183 struct cpu_info *ci; 184 185 for (CPU_INFO_FOREACH(cii, ci)) { 186 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 187 } 188 } 189 190 static void 191 lockdebug_unlock_cpus(void) 192 { 193 CPU_INFO_ITERATOR cii; 194 struct cpu_info *ci; 195 196 for (CPU_INFO_FOREACH(cii, ci)) { 197 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 198 } 199 } 200 201 /* 202 * lockdebug_lookup: 203 * 204 * Find a lockdebug structure by a pointer to a lock and return it locked. 205 */ 206 static inline lockdebug_t * 207 lockdebug_lookup(const char *func, size_t line, const volatile void *lock, 208 uintptr_t where) 209 { 210 lockdebug_t *ld; 211 212 ld = lockdebug_lookup1(lock); 213 if (__predict_false(ld == NULL)) { 214 panic("%s,%zu: uninitialized lock (lock=%p, from=%08" 215 PRIxPTR ")", func, line, lock, where); 216 } 217 return ld; 218 } 219 220 /* 221 * lockdebug_init: 222 * 223 * Initialize the lockdebug system. Allocate an initial pool of 224 * lockdebug structures before the VM system is up and running. 225 */ 226 static void 227 lockdebug_init(void) 228 { 229 lockdebug_t *ld; 230 int i; 231 232 TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks); 233 TAILQ_INIT(&curlwp->l_ld_locks); 234 __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock); 235 __cpu_simple_lock_init(&ld_mod_lk); 236 237 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops); 238 239 ld = ld_prime; 240 for (i = 1, ld++; i < LD_BATCH; i++, ld++) { 241 __cpu_simple_lock_init(&ld->ld_spinlock); 242 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 243 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 244 } 245 ld_freeptr = 1; 246 ld_nfree = LD_BATCH - 1; 247 } 248 249 /* 250 * lockdebug_alloc: 251 * 252 * A lock is being initialized, so allocate an associated debug 253 * structure. 254 */ 255 bool 256 lockdebug_alloc(const char *func, size_t line, volatile void *lock, 257 lockops_t *lo, uintptr_t initaddr) 258 { 259 struct cpu_info *ci; 260 lockdebug_t *ld; 261 int s; 262 263 if (__predict_false(lo == NULL || panicstr != NULL || ld_panic)) 264 return false; 265 if (__predict_false(ld_freeptr == 0)) 266 lockdebug_init(); 267 268 s = splhigh(); 269 __cpu_simple_lock(&ld_mod_lk); 270 if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) { 271 __cpu_simple_unlock(&ld_mod_lk); 272 lockdebug_abort1(func, line, ld, s, "already initialized", 273 true); 274 return false; 275 } 276 277 /* 278 * Pinch a new debug structure. We may recurse because we call 279 * kmem_alloc(), which may need to initialize new locks somewhere 280 * down the path. If not recursing, we try to maintain at least 281 * LD_SLOP structures free, which should hopefully be enough to 282 * satisfy kmem_alloc(). If we can't provide a structure, not to 283 * worry: we'll just mark the lock as not having an ID. 284 */ 285 ci = curcpu(); 286 ci->ci_lkdebug_recurse++; 287 if (TAILQ_EMPTY(&ld_free)) { 288 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 289 ci->ci_lkdebug_recurse--; 290 __cpu_simple_unlock(&ld_mod_lk); 291 splx(s); 292 return false; 293 } 294 s = lockdebug_more(s); 295 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { 296 s = lockdebug_more(s); 297 } 298 if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) { 299 __cpu_simple_unlock(&ld_mod_lk); 300 splx(s); 301 return false; 302 } 303 TAILQ_REMOVE(&ld_free, ld, ld_chain); 304 ld_nfree--; 305 ci->ci_lkdebug_recurse--; 306 307 if (__predict_false(ld->ld_lock != NULL)) { 308 panic("%s,%zu: corrupt table ld %p", func, line, ld); 309 } 310 311 /* Initialise the structure. */ 312 ld->ld_lock = lock; 313 ld->ld_lockops = lo; 314 ld->ld_locked = 0; 315 ld->ld_unlocked = 0; 316 ld->ld_lwp = NULL; 317 ld->ld_initaddr = initaddr; 318 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); 319 lockdebug_lock_cpus(); 320 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); 321 lockdebug_unlock_cpus(); 322 __cpu_simple_unlock(&ld_mod_lk); 323 324 splx(s); 325 return true; 326 } 327 328 /* 329 * lockdebug_free: 330 * 331 * A lock is being destroyed, so release debugging resources. 332 */ 333 void 334 lockdebug_free(const char *func, size_t line, volatile void *lock) 335 { 336 lockdebug_t *ld; 337 int s; 338 339 if (__predict_false(panicstr != NULL || ld_panic)) 340 return; 341 342 s = splhigh(); 343 __cpu_simple_lock(&ld_mod_lk); 344 ld = lockdebug_lookup(func, line, lock, 345 (uintptr_t) __builtin_return_address(0)); 346 if (__predict_false(ld == NULL)) { 347 __cpu_simple_unlock(&ld_mod_lk); 348 panic("%s,%zu: destroying uninitialized object %p" 349 "(ld_lock=%p)", func, line, lock, ld->ld_lock); 350 return; 351 } 352 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 || 353 ld->ld_shares != 0)) { 354 __cpu_simple_unlock(&ld_mod_lk); 355 lockdebug_abort1(func, line, ld, s, "is locked or in use", 356 true); 357 return; 358 } 359 lockdebug_lock_cpus(); 360 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); 361 lockdebug_unlock_cpus(); 362 ld->ld_lock = NULL; 363 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 364 ld_nfree++; 365 __cpu_simple_unlock(&ld->ld_spinlock); 366 __cpu_simple_unlock(&ld_mod_lk); 367 splx(s); 368 } 369 370 /* 371 * lockdebug_more: 372 * 373 * Allocate a batch of debug structures and add to the free list. 374 * Must be called with ld_mod_lk held. 375 */ 376 static int 377 lockdebug_more(int s) 378 { 379 lockdebug_t *ld; 380 void *block; 381 int i, base, m; 382 383 /* 384 * Can't call kmem_alloc() if in interrupt context. XXX We could 385 * deadlock, because we don't know which locks the caller holds. 386 */ 387 if (cpu_intr_p() || cpu_softintr_p()) { 388 return s; 389 } 390 391 while (ld_nfree < LD_SLOP) { 392 __cpu_simple_unlock(&ld_mod_lk); 393 splx(s); 394 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); 395 s = splhigh(); 396 __cpu_simple_lock(&ld_mod_lk); 397 398 if (ld_nfree > LD_SLOP) { 399 /* Somebody beat us to it. */ 400 __cpu_simple_unlock(&ld_mod_lk); 401 splx(s); 402 kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); 403 s = splhigh(); 404 __cpu_simple_lock(&ld_mod_lk); 405 continue; 406 } 407 408 base = ld_freeptr; 409 ld_nfree += LD_BATCH; 410 ld = block; 411 base <<= LD_BATCH_SHIFT; 412 m = uimin(LD_MAX_LOCKS, base + LD_BATCH); 413 414 if (m == LD_MAX_LOCKS) 415 ld_nomore = true; 416 417 for (i = base; i < m; i++, ld++) { 418 __cpu_simple_lock_init(&ld->ld_spinlock); 419 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 420 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 421 } 422 423 membar_producer(); 424 } 425 426 return s; 427 } 428 429 /* 430 * lockdebug_wantlock: 431 * 432 * Process the preamble to a lock acquire. The "shared" 433 * parameter controls which ld_{ex,sh}want counter is 434 * updated; a negative value of shared updates neither. 435 */ 436 void 437 lockdebug_wantlock(const char *func, size_t line, 438 const volatile void *lock, uintptr_t where, int shared) 439 { 440 struct lwp *l = curlwp; 441 lockdebug_t *ld; 442 bool recurse; 443 int s; 444 445 (void)shared; 446 recurse = false; 447 448 if (__predict_false(panicstr != NULL || ld_panic)) 449 return; 450 451 s = splhigh(); 452 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 453 splx(s); 454 return; 455 } 456 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 457 if ((ld->ld_flags & LD_SLEEPER) != 0) { 458 if (ld->ld_lwp == l) 459 recurse = true; 460 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 461 recurse = true; 462 } 463 if (cpu_intr_p()) { 464 if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) { 465 lockdebug_abort1(func, line, ld, s, 466 "acquiring sleep lock from interrupt context", 467 true); 468 return; 469 } 470 } 471 if (shared > 0) 472 ld->ld_shwant++; 473 else if (shared == 0) 474 ld->ld_exwant++; 475 if (__predict_false(recurse)) { 476 lockdebug_abort1(func, line, ld, s, "locking against myself", 477 true); 478 return; 479 } 480 if (l->l_ld_wanted == NULL) { 481 l->l_ld_wanted = ld; 482 } 483 __cpu_simple_unlock(&ld->ld_spinlock); 484 splx(s); 485 } 486 487 /* 488 * lockdebug_locked: 489 * 490 * Process a lock acquire operation. 491 */ 492 void 493 lockdebug_locked(const char *func, size_t line, 494 volatile void *lock, void *cvlock, uintptr_t where, int shared) 495 { 496 struct lwp *l = curlwp; 497 lockdebug_t *ld; 498 int s; 499 500 if (__predict_false(panicstr != NULL || ld_panic)) 501 return; 502 503 s = splhigh(); 504 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 505 splx(s); 506 return; 507 } 508 if (cvlock) { 509 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV); 510 if (lock == (void *)&lbolt) { 511 /* nothing */ 512 } else if (ld->ld_shares++ == 0) { 513 ld->ld_locked = (uintptr_t)cvlock; 514 } else if (__predict_false(cvlock != (void *)ld->ld_locked)) { 515 lockdebug_abort1(func, line, ld, s, 516 "multiple locks used with condition variable", 517 true); 518 return; 519 } 520 } else if (shared) { 521 l->l_shlocks++; 522 ld->ld_locked = where; 523 ld->ld_shares++; 524 ld->ld_shwant--; 525 } else { 526 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) { 527 lockdebug_abort1(func, line, ld, s, "already locked", 528 true); 529 return; 530 } 531 ld->ld_flags |= LD_LOCKED; 532 ld->ld_locked = where; 533 ld->ld_exwant--; 534 if ((ld->ld_flags & LD_SLEEPER) != 0) { 535 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); 536 } else { 537 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, 538 ld, ld_chain); 539 } 540 } 541 ld->ld_cpu = (uint16_t)cpu_index(curcpu()); 542 ld->ld_lwp = l; 543 __cpu_simple_unlock(&ld->ld_spinlock); 544 if (l->l_ld_wanted == ld) { 545 l->l_ld_wanted = NULL; 546 } 547 splx(s); 548 } 549 550 /* 551 * lockdebug_unlocked: 552 * 553 * Process a lock release operation. 554 */ 555 void 556 lockdebug_unlocked(const char *func, size_t line, 557 volatile void *lock, uintptr_t where, int shared) 558 { 559 struct lwp *l = curlwp; 560 lockdebug_t *ld; 561 int s; 562 563 if (__predict_false(panicstr != NULL || ld_panic)) 564 return; 565 566 s = splhigh(); 567 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 568 splx(s); 569 return; 570 } 571 if (ld->ld_lockops->lo_type == LOCKOPS_CV) { 572 if (lock == (void *)&lbolt) { 573 /* nothing */ 574 } else { 575 ld->ld_shares--; 576 } 577 } else if (shared) { 578 if (__predict_false(l->l_shlocks == 0)) { 579 lockdebug_abort1(func, line, ld, s, 580 "no shared locks held by LWP", true); 581 return; 582 } 583 if (__predict_false(ld->ld_shares == 0)) { 584 lockdebug_abort1(func, line, ld, s, 585 "no shared holds on this lock", true); 586 return; 587 } 588 l->l_shlocks--; 589 ld->ld_shares--; 590 if (ld->ld_lwp == l) { 591 ld->ld_unlocked = where; 592 ld->ld_lwp = NULL; 593 } 594 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 595 ld->ld_cpu = (uint16_t)-1; 596 } else { 597 if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) { 598 lockdebug_abort1(func, line, ld, s, "not locked", true); 599 return; 600 } 601 602 if ((ld->ld_flags & LD_SLEEPER) != 0) { 603 if (__predict_false(ld->ld_lwp != curlwp)) { 604 lockdebug_abort1(func, line, ld, s, 605 "not held by current LWP", true); 606 return; 607 } 608 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); 609 } else { 610 uint16_t idx = (uint16_t)cpu_index(curcpu()); 611 if (__predict_false(ld->ld_cpu != idx)) { 612 lockdebug_abort1(func, line, ld, s, 613 "not held by current CPU", true); 614 return; 615 } 616 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, 617 ld_chain); 618 } 619 ld->ld_flags &= ~LD_LOCKED; 620 ld->ld_unlocked = where; 621 ld->ld_lwp = NULL; 622 } 623 __cpu_simple_unlock(&ld->ld_spinlock); 624 splx(s); 625 } 626 627 /* 628 * lockdebug_wakeup: 629 * 630 * Process a wakeup on a condition variable. 631 */ 632 void 633 lockdebug_wakeup(const char *func, size_t line, volatile void *lock, 634 uintptr_t where) 635 { 636 lockdebug_t *ld; 637 int s; 638 639 if (__predict_false(panicstr != NULL || ld_panic || lock == (void *)&lbolt)) 640 return; 641 642 s = splhigh(); 643 /* Find the CV... */ 644 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 645 splx(s); 646 return; 647 } 648 /* 649 * If it has any waiters, ensure that they are using the 650 * same interlock. 651 */ 652 if (__predict_false(ld->ld_shares != 0 && 653 !mutex_owned((kmutex_t *)ld->ld_locked))) { 654 lockdebug_abort1(func, line, ld, s, "interlocking mutex not " 655 "held during wakeup", true); 656 return; 657 } 658 __cpu_simple_unlock(&ld->ld_spinlock); 659 splx(s); 660 } 661 662 /* 663 * lockdebug_barrier: 664 * 665 * Panic if we hold more than one specified lock, and optionally, if we 666 * hold any sleep locks. 667 */ 668 void 669 lockdebug_barrier(const char *func, size_t line, volatile void *onelock, 670 int slplocks) 671 { 672 struct lwp *l = curlwp; 673 lockdebug_t *ld; 674 int s; 675 676 if (__predict_false(panicstr != NULL || ld_panic)) 677 return; 678 679 s = splhigh(); 680 if ((l->l_pflag & LP_INTR) == 0) { 681 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { 682 if (ld->ld_lock == onelock) { 683 continue; 684 } 685 __cpu_simple_lock(&ld->ld_spinlock); 686 lockdebug_abort1(func, line, ld, s, 687 "spin lock held", true); 688 return; 689 } 690 } 691 if (slplocks) { 692 splx(s); 693 return; 694 } 695 ld = TAILQ_FIRST(&l->l_ld_locks); 696 if (__predict_false(ld != NULL && ld->ld_lock != onelock)) { 697 __cpu_simple_lock(&ld->ld_spinlock); 698 lockdebug_abort1(func, line, ld, s, "sleep lock held", true); 699 return; 700 } 701 splx(s); 702 if (l->l_shlocks != 0) { 703 TAILQ_FOREACH(ld, &ld_all, ld_achain) { 704 if (ld->ld_lock == onelock) { 705 continue; 706 } 707 if (ld->ld_lockops->lo_type == LOCKOPS_CV) 708 continue; 709 if (ld->ld_lwp == l) 710 lockdebug_dump(l, ld, printf); 711 } 712 panic("%s,%zu: holding %d shared locks", func, line, 713 l->l_shlocks); 714 } 715 } 716 717 /* 718 * lockdebug_mem_check: 719 * 720 * Check for in-use locks within a memory region that is 721 * being freed. 722 */ 723 void 724 lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz) 725 { 726 lockdebug_t *ld; 727 struct cpu_info *ci; 728 int s; 729 730 if (__predict_false(panicstr != NULL || ld_panic)) 731 return; 732 733 s = splhigh(); 734 ci = curcpu(); 735 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 736 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); 737 if (ld != NULL) { 738 const uintptr_t lock = (uintptr_t)ld->ld_lock; 739 740 if (__predict_false((uintptr_t)base > lock)) 741 panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu", 742 func, line, ld, base, sz); 743 if (lock >= (uintptr_t)base + sz) 744 ld = NULL; 745 } 746 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 747 if (__predict_false(ld != NULL)) { 748 __cpu_simple_lock(&ld->ld_spinlock); 749 lockdebug_abort1(func, line, ld, s, 750 "allocation contains active lock", !cold); 751 return; 752 } 753 splx(s); 754 } 755 #endif /* _KERNEL */ 756 757 #ifdef DDB 758 #include <machine/db_machdep.h> 759 #include <ddb/db_interface.h> 760 #include <ddb/db_access.h> 761 #endif 762 763 /* 764 * lockdebug_dump: 765 * 766 * Dump information about a lock on panic, or for DDB. 767 */ 768 static void 769 lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...) 770 __printflike(1, 2)) 771 { 772 int sleeper = (ld->ld_flags & LD_SLEEPER); 773 lockops_t *lo = ld->ld_lockops; 774 775 (*pr)( 776 "lock address : %#018lx type : %18s\n" 777 "initialized : %#018lx", 778 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), 779 (long)ld->ld_initaddr); 780 781 #ifndef _KERNEL 782 lockops_t los; 783 lo = &los; 784 db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo); 785 #endif 786 if (lo->lo_type == LOCKOPS_CV) { 787 (*pr)(" interlock: %#018lx\n", (long)ld->ld_locked); 788 } else { 789 (*pr)("\n" 790 "shared holds : %18u exclusive: %18u\n" 791 "shares wanted: %18u exclusive: %18u\n" 792 "relevant cpu : %18u last held: %18u\n" 793 "relevant lwp : %#018lx last held: %#018lx\n" 794 "last locked%c : %#018lx unlocked%c: %#018lx\n", 795 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), 796 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, 797 (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu, 798 (long)l, (long)ld->ld_lwp, 799 ((ld->ld_flags & LD_LOCKED) ? '*' : ' '), 800 (long)ld->ld_locked, 801 ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'), 802 (long)ld->ld_unlocked); 803 } 804 805 #ifdef _KERNEL 806 if (lo->lo_dump != NULL) 807 (*lo->lo_dump)(ld->ld_lock, pr); 808 809 if (sleeper) { 810 turnstile_print(ld->ld_lock, pr); 811 } 812 #endif 813 } 814 815 #ifdef _KERNEL 816 /* 817 * lockdebug_abort1: 818 * 819 * An error has been trapped - dump lock info and panic. 820 */ 821 static void 822 lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s, 823 const char *msg, bool dopanic) 824 { 825 826 /* 827 * Don't make the situation worse if the system is already going 828 * down in flames. Once a panic is triggered, lockdebug state 829 * becomes stale and cannot be trusted. 830 */ 831 if (atomic_inc_uint_nv(&ld_panic) != 1) { 832 __cpu_simple_unlock(&ld->ld_spinlock); 833 splx(s); 834 return; 835 } 836 837 printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name, 838 func, line, msg); 839 lockdebug_dump(curlwp, ld, printf_nolog); 840 __cpu_simple_unlock(&ld->ld_spinlock); 841 splx(s); 842 printf_nolog("\n"); 843 if (dopanic) 844 panic("LOCKDEBUG: %s error: %s,%zu: %s", 845 ld->ld_lockops->lo_name, func, line, msg); 846 } 847 848 #endif /* _KERNEL */ 849 #endif /* LOCKDEBUG */ 850 851 /* 852 * lockdebug_lock_print: 853 * 854 * Handle the DDB 'show lock' command. 855 */ 856 #ifdef DDB 857 void 858 lockdebug_lock_print(void *addr, 859 void (*pr)(const char *, ...) __printflike(1, 2)) 860 { 861 #ifdef LOCKDEBUG 862 lockdebug_t *ld, lds; 863 864 TAILQ_FOREACH(ld, &ld_all, ld_achain) { 865 db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds)); 866 ld = &lds; 867 if (ld->ld_lock == NULL) 868 continue; 869 if (addr == NULL || ld->ld_lock == addr) { 870 lockdebug_dump(curlwp, ld, pr); 871 if (addr != NULL) 872 return; 873 } 874 } 875 if (addr != NULL) { 876 (*pr)("Sorry, no record of a lock with address %p found.\n", 877 addr); 878 } 879 #else 880 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 881 #endif /* LOCKDEBUG */ 882 } 883 884 #ifdef _KERNEL 885 #ifdef LOCKDEBUG 886 static void 887 lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i, 888 void (*pr)(const char *, ...) __printflike(1, 2)) 889 { 890 const char *sym; 891 892 #ifdef _KERNEL 893 ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr, 894 KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY); 895 #endif 896 (*pr)("* Lock %d (initialized at %s)\n", i++, sym); 897 lockdebug_dump(l, ld, pr); 898 } 899 900 static void 901 lockdebug_show_trace(const void *ptr, 902 void (*pr)(const char *, ...) __printflike(1, 2)) 903 { 904 db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr); 905 } 906 907 static void 908 lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2), 909 bool show_trace) 910 { 911 struct proc *p; 912 913 LIST_FOREACH(p, &allproc, p_list) { 914 struct lwp *l; 915 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 916 lockdebug_t *ld; 917 int i = 0; 918 if (TAILQ_EMPTY(&l->l_ld_locks) && 919 l->l_ld_wanted == NULL) { 920 continue; 921 } 922 (*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n", 923 p->p_pid, l->l_lid, 924 l->l_name ? l->l_name : p->p_comm, l, l->l_stat); 925 if (!TAILQ_EMPTY(&l->l_ld_locks)) { 926 (*pr)("\n*** Locks held: \n"); 927 TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) { 928 (*pr)("\n"); 929 lockdebug_show_one(l, ld, i++, pr); 930 } 931 } else { 932 (*pr)("\n*** Locks held: none\n"); 933 } 934 935 if (l->l_ld_wanted != NULL) { 936 (*pr)("\n*** Locks wanted: \n\n"); 937 lockdebug_show_one(l, l->l_ld_wanted, 0, pr); 938 } else { 939 (*pr)("\n*** Locks wanted: none\n"); 940 } 941 if (show_trace) { 942 (*pr)("\n*** Traceback: \n\n"); 943 lockdebug_show_trace(l, pr); 944 (*pr)("\n"); 945 } 946 } 947 } 948 } 949 950 static void 951 lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2), 952 bool show_trace) 953 { 954 lockdebug_t *ld; 955 CPU_INFO_ITERATOR cii; 956 struct cpu_info *ci; 957 958 for (CPU_INFO_FOREACH(cii, ci)) { 959 int i = 0; 960 if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks)) 961 continue; 962 (*pr)("\n******* Locks held on %s:\n", cpu_name(ci)); 963 TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) { 964 (*pr)("\n"); 965 #ifdef MULTIPROCESSOR 966 lockdebug_show_one(ci->ci_curlwp, ld, i++, pr); 967 if (show_trace) 968 lockdebug_show_trace(ci->ci_curlwp, pr); 969 #else 970 lockdebug_show_one(curlwp, ld, i++, pr); 971 if (show_trace) 972 lockdebug_show_trace(curlwp, pr); 973 #endif 974 } 975 } 976 } 977 #endif /* _KERNEL */ 978 #endif /* LOCKDEBUG */ 979 980 #ifdef _KERNEL 981 void 982 lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2), 983 const char *modif) 984 { 985 #ifdef LOCKDEBUG 986 bool show_trace = false; 987 if (modif[0] == 't') 988 show_trace = true; 989 990 (*pr)("[Locks tracked through LWPs]\n"); 991 lockdebug_show_all_locks_lwp(pr, show_trace); 992 (*pr)("\n"); 993 994 (*pr)("[Locks tracked through CPUs]\n"); 995 lockdebug_show_all_locks_cpu(pr, show_trace); 996 (*pr)("\n"); 997 #else 998 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 999 #endif /* LOCKDEBUG */ 1000 } 1001 1002 void 1003 lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2)) 1004 { 1005 #ifdef LOCKDEBUG 1006 lockdebug_t *ld; 1007 void *_ld; 1008 uint32_t n_null = 0; 1009 uint32_t n_spin_mutex = 0; 1010 uint32_t n_adaptive_mutex = 0; 1011 uint32_t n_rwlock = 0; 1012 uint32_t n_cv = 0; 1013 uint32_t n_others = 0; 1014 1015 RB_TREE_FOREACH(_ld, &ld_rb_tree) { 1016 ld = _ld; 1017 if (ld->ld_lock == NULL) { 1018 n_null++; 1019 continue; 1020 } 1021 if (ld->ld_lockops->lo_type == LOCKOPS_CV) { 1022 n_cv++; 1023 continue; 1024 } 1025 if (ld->ld_lockops->lo_name[0] == 'M') { 1026 if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP) 1027 n_adaptive_mutex++; 1028 else 1029 n_spin_mutex++; 1030 continue; 1031 } 1032 if (ld->ld_lockops->lo_name[0] == 'R') { 1033 n_rwlock++; 1034 continue; 1035 } 1036 n_others++; 1037 } 1038 (*pr)( 1039 "condvar: %u\n" 1040 "spin mutex: %u\n" 1041 "adaptive mutex: %u\n" 1042 "rwlock: %u\n" 1043 "null locks: %u\n" 1044 "others: %u\n", 1045 n_cv, n_spin_mutex, n_adaptive_mutex, n_rwlock, 1046 n_null, n_others); 1047 #else 1048 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 1049 #endif /* LOCKDEBUG */ 1050 } 1051 #endif /* _KERNEL */ 1052 #endif /* DDB */ 1053 1054 #ifdef _KERNEL 1055 /* 1056 * lockdebug_dismiss: 1057 * 1058 * The system is rebooting, and potentially from an unsafe 1059 * place so avoid any future aborts. 1060 */ 1061 void 1062 lockdebug_dismiss(void) 1063 { 1064 1065 atomic_inc_uint_nv(&ld_panic); 1066 } 1067 1068 /* 1069 * lockdebug_abort: 1070 * 1071 * An error has been trapped - dump lock info and call panic(). 1072 */ 1073 void 1074 lockdebug_abort(const char *func, size_t line, const volatile void *lock, 1075 lockops_t *ops, const char *msg) 1076 { 1077 #ifdef LOCKDEBUG 1078 lockdebug_t *ld; 1079 int s; 1080 1081 s = splhigh(); 1082 if ((ld = lockdebug_lookup(func, line, lock, 1083 (uintptr_t) __builtin_return_address(0))) != NULL) { 1084 lockdebug_abort1(func, line, ld, s, msg, true); 1085 return; 1086 } 1087 splx(s); 1088 #endif /* LOCKDEBUG */ 1089 1090 /* 1091 * Don't make the situation worse if the system is already going 1092 * down in flames. Once a panic is triggered, lockdebug state 1093 * becomes stale and cannot be trusted. 1094 */ 1095 if (atomic_inc_uint_nv(&ld_panic) > 1) 1096 return; 1097 1098 printf_nolog("%s error: %s,%zu: %s\n\n" 1099 "lock address : %#018lx\n" 1100 "current cpu : %18d\n" 1101 "current lwp : %#018lx\n", 1102 ops->lo_name, func, line, msg, (long)lock, 1103 (int)cpu_index(curcpu()), (long)curlwp); 1104 (*ops->lo_dump)(lock, printf_nolog); 1105 printf_nolog("\n"); 1106 1107 panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p", 1108 ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp); 1109 } 1110 #endif /* _KERNEL */ 1111