1 /* $NetBSD: kern_lock.c,v 1.59 2001/09/29 21:27:49 chs Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * This code is derived from software contributed to The NetBSD Foundation 12 * by Ross Harvey. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the NetBSD 25 * Foundation, Inc. and its contributors. 26 * 4. Neither the name of The NetBSD Foundation nor the names of its 27 * contributors may be used to endorse or promote products derived 28 * from this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 1995 45 * The Regents of the University of California. All rights reserved. 46 * 47 * This code contains ideas from software contributed to Berkeley by 48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 49 * System project at Carnegie-Mellon University. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. All advertising materials mentioning features or use of this software 60 * must display the following acknowledgement: 61 * This product includes software developed by the University of 62 * California, Berkeley and its contributors. 63 * 4. Neither the name of the University nor the names of its contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 77 * SUCH DAMAGE. 78 * 79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 80 */ 81 82 #include "opt_multiprocessor.h" 83 #include "opt_lockdebug.h" 84 #include "opt_ddb.h" 85 86 #include <sys/param.h> 87 #include <sys/proc.h> 88 #include <sys/lock.h> 89 #include <sys/systm.h> 90 #include <machine/cpu.h> 91 92 #if defined(LOCKDEBUG) 93 #include <sys/syslog.h> 94 /* 95 * note that stdarg.h and the ansi style va_start macro is used for both 96 * ansi and traditional c compiles. 97 * XXX: this requires that stdarg.h define: va_alist and va_dcl 98 */ 99 #include <machine/stdarg.h> 100 101 void lock_printf(const char *fmt, ...) 102 __attribute__((__format__(__printf__,1,2))); 103 104 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */ 105 106 #ifdef DDB 107 #include <ddb/ddbvar.h> 108 #include <machine/db_machdep.h> 109 #include <ddb/db_command.h> 110 #include <ddb/db_interface.h> 111 #endif 112 #endif 113 114 /* 115 * Locking primitives implementation. 116 * Locks provide shared/exclusive synchronization. 117 */ 118 119 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */ 120 #if defined(MULTIPROCESSOR) /* { */ 121 #define COUNT_CPU(cpu_id, x) \ 122 curcpu()->ci_spin_locks += (x) 123 #else 124 u_long spin_locks; 125 #define COUNT_CPU(cpu_id, x) spin_locks += (x) 126 #endif /* MULTIPROCESSOR */ /* } */ 127 128 #define COUNT(lkp, p, cpu_id, x) \ 129 do { \ 130 if ((lkp)->lk_flags & LK_SPIN) \ 131 COUNT_CPU((cpu_id), (x)); \ 132 else \ 133 (p)->p_locks += (x); \ 134 } while (/*CONSTCOND*/0) 135 #else 136 #define COUNT(lkp, p, cpu_id, x) 137 #define COUNT_CPU(cpu_id, x) 138 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */ 139 140 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */ 141 #define SPINLOCK_SPIN_HOOK /* nothing */ 142 #endif 143 144 #define INTERLOCK_ACQUIRE(lkp, flags, s) \ 145 do { \ 146 if ((flags) & LK_SPIN) \ 147 s = splsched(); \ 148 simple_lock(&(lkp)->lk_interlock); \ 149 } while (0) 150 151 #define INTERLOCK_RELEASE(lkp, flags, s) \ 152 do { \ 153 simple_unlock(&(lkp)->lk_interlock); \ 154 if ((flags) & LK_SPIN) \ 155 splx(s); \ 156 } while (0) 157 158 #if defined(LOCKDEBUG) 159 #if defined(DDB) 160 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger() 161 #else 162 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */ 163 #endif 164 165 #define SPINLOCK_SPINCHECK_DECL \ 166 /* 32-bits of count -- wrap constitutes a "spinout" */ \ 167 uint32_t __spinc = 0 168 169 #define SPINLOCK_SPINCHECK \ 170 do { \ 171 if (++__spinc == 0) { \ 172 printf("LK_SPIN spinout, excl %d, share %d\n", \ 173 lkp->lk_exclusivecount, lkp->lk_sharecount); \ 174 if (lkp->lk_exclusivecount) \ 175 printf("held by CPU %lu\n", \ 176 (u_long) lkp->lk_cpu); \ 177 if (lkp->lk_lock_file) \ 178 printf("last locked at %s:%d\n", \ 179 lkp->lk_lock_file, lkp->lk_lock_line); \ 180 if (lkp->lk_unlock_file) \ 181 printf("last unlocked at %s:%d\n", \ 182 lkp->lk_unlock_file, lkp->lk_unlock_line); \ 183 SPINLOCK_SPINCHECK_DEBUGGER; \ 184 } \ 185 } while (0) 186 #else 187 #define SPINLOCK_SPINCHECK_DECL /* nothing */ 188 #define SPINLOCK_SPINCHECK /* nothing */ 189 #endif /* LOCKDEBUG && DDB */ 190 191 /* 192 * Acquire a resource. 193 */ 194 #define ACQUIRE(lkp, error, extflags, drain, wanted) \ 195 if ((extflags) & LK_SPIN) { \ 196 int interlocked; \ 197 SPINLOCK_SPINCHECK_DECL; \ 198 \ 199 if ((drain) == 0) \ 200 (lkp)->lk_waitcount++; \ 201 for (interlocked = 1;;) { \ 202 SPINLOCK_SPINCHECK; \ 203 if (wanted) { \ 204 if (interlocked) { \ 205 INTERLOCK_RELEASE((lkp), \ 206 LK_SPIN, s); \ 207 interlocked = 0; \ 208 } \ 209 SPINLOCK_SPIN_HOOK; \ 210 } else if (interlocked) { \ 211 break; \ 212 } else { \ 213 INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \ 214 interlocked = 1; \ 215 } \ 216 } \ 217 if ((drain) == 0) \ 218 (lkp)->lk_waitcount--; \ 219 KASSERT((wanted) == 0); \ 220 error = 0; /* sanity */ \ 221 } else { \ 222 for (error = 0; wanted; ) { \ 223 if ((drain)) \ 224 (lkp)->lk_flags |= LK_WAITDRAIN; \ 225 else \ 226 (lkp)->lk_waitcount++; \ 227 /* XXX Cast away volatile. */ \ 228 error = ltsleep((drain) ? \ 229 (void *)&(lkp)->lk_flags : \ 230 (void *)(lkp), (lkp)->lk_prio, \ 231 (lkp)->lk_wmesg, (lkp)->lk_timo, \ 232 &(lkp)->lk_interlock); \ 233 if ((drain) == 0) \ 234 (lkp)->lk_waitcount--; \ 235 if (error) \ 236 break; \ 237 if ((extflags) & LK_SLEEPFAIL) { \ 238 error = ENOLCK; \ 239 break; \ 240 } \ 241 } \ 242 } 243 244 #define SETHOLDER(lkp, pid, cpu_id) \ 245 do { \ 246 if ((lkp)->lk_flags & LK_SPIN) \ 247 (lkp)->lk_cpu = cpu_id; \ 248 else \ 249 (lkp)->lk_lockholder = pid; \ 250 } while (/*CONSTCOND*/0) 251 252 #define WEHOLDIT(lkp, pid, cpu_id) \ 253 (((lkp)->lk_flags & LK_SPIN) != 0 ? \ 254 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid))) 255 256 #define WAKEUP_WAITER(lkp) \ 257 do { \ 258 if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \ 259 /* XXX Cast away volatile. */ \ 260 wakeup((void *)(lkp)); \ 261 } \ 262 } while (/*CONSTCOND*/0) 263 264 #if defined(LOCKDEBUG) /* { */ 265 #if defined(MULTIPROCESSOR) /* { */ 266 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER; 267 268 #define SPINLOCK_LIST_LOCK() \ 269 __cpu_simple_lock(&spinlock_list_slock.lock_data) 270 271 #define SPINLOCK_LIST_UNLOCK() \ 272 __cpu_simple_unlock(&spinlock_list_slock.lock_data) 273 #else 274 #define SPINLOCK_LIST_LOCK() /* nothing */ 275 276 #define SPINLOCK_LIST_UNLOCK() /* nothing */ 277 #endif /* MULTIPROCESSOR */ /* } */ 278 279 TAILQ_HEAD(, lock) spinlock_list = 280 TAILQ_HEAD_INITIALIZER(spinlock_list); 281 282 #define HAVEIT(lkp) \ 283 do { \ 284 if ((lkp)->lk_flags & LK_SPIN) { \ 285 int s = spllock(); \ 286 SPINLOCK_LIST_LOCK(); \ 287 /* XXX Cast away volatile. */ \ 288 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \ 289 lk_list); \ 290 SPINLOCK_LIST_UNLOCK(); \ 291 splx(s); \ 292 } \ 293 } while (/*CONSTCOND*/0) 294 295 #define DONTHAVEIT(lkp) \ 296 do { \ 297 if ((lkp)->lk_flags & LK_SPIN) { \ 298 int s = spllock(); \ 299 SPINLOCK_LIST_LOCK(); \ 300 /* XXX Cast away volatile. */ \ 301 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \ 302 lk_list); \ 303 SPINLOCK_LIST_UNLOCK(); \ 304 splx(s); \ 305 } \ 306 } while (/*CONSTCOND*/0) 307 #else 308 #define HAVEIT(lkp) /* nothing */ 309 310 #define DONTHAVEIT(lkp) /* nothing */ 311 #endif /* LOCKDEBUG */ /* } */ 312 313 #if defined(LOCKDEBUG) 314 /* 315 * Lock debug printing routine; can be configured to print to console 316 * or log to syslog. 317 */ 318 void 319 lock_printf(const char *fmt, ...) 320 { 321 va_list ap; 322 323 va_start(ap, fmt); 324 if (lock_debug_syslog) 325 vlog(LOG_DEBUG, fmt, ap); 326 else 327 vprintf(fmt, ap); 328 va_end(ap); 329 } 330 #endif /* LOCKDEBUG */ 331 332 /* 333 * Initialize a lock; required before use. 334 */ 335 void 336 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags) 337 { 338 339 memset(lkp, 0, sizeof(struct lock)); 340 simple_lock_init(&lkp->lk_interlock); 341 lkp->lk_flags = flags & LK_EXTFLG_MASK; 342 if (flags & LK_SPIN) 343 lkp->lk_cpu = LK_NOCPU; 344 else { 345 lkp->lk_lockholder = LK_NOPROC; 346 lkp->lk_prio = prio; 347 lkp->lk_timo = timo; 348 } 349 lkp->lk_wmesg = wmesg; /* just a name for spin locks */ 350 #if defined(LOCKDEBUG) 351 lkp->lk_lock_file = NULL; 352 lkp->lk_unlock_file = NULL; 353 #endif 354 } 355 356 /* 357 * Determine the status of a lock. 358 */ 359 int 360 lockstatus(struct lock *lkp) 361 { 362 int s, lock_type = 0; 363 364 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 365 if (lkp->lk_exclusivecount != 0) 366 lock_type = LK_EXCLUSIVE; 367 else if (lkp->lk_sharecount != 0) 368 lock_type = LK_SHARED; 369 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 370 return (lock_type); 371 } 372 373 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) 374 /* 375 * Make sure no spin locks are held by a CPU that is about 376 * to context switch. 377 */ 378 void 379 spinlock_switchcheck(void) 380 { 381 u_long cnt; 382 int s; 383 384 s = spllock(); 385 #if defined(MULTIPROCESSOR) 386 cnt = curcpu()->ci_spin_locks; 387 #else 388 cnt = spin_locks; 389 #endif 390 splx(s); 391 392 if (cnt != 0) 393 panic("spinlock_switchcheck: CPU %lu has %lu spin locks", 394 (u_long) cpu_number(), cnt); 395 } 396 #endif /* LOCKDEBUG || DIAGNOSTIC */ 397 398 /* 399 * Locks and IPLs (interrupt priority levels): 400 * 401 * Locks which may be taken from interrupt context must be handled 402 * very carefully; you must spl to the highest IPL where the lock 403 * is needed before acquiring the lock. 404 * 405 * It is also important to avoid deadlock, since certain (very high 406 * priority) interrupts are often needed to keep the system as a whole 407 * from deadlocking, and must not be blocked while you are spinning 408 * waiting for a lower-priority lock. 409 * 410 * In addition, the lock-debugging hooks themselves need to use locks! 411 * 412 * A raw __cpu_simple_lock may be used from interrupts are long as it 413 * is acquired and held at a single IPL. 414 * 415 * A simple_lock (which is a __cpu_simple_lock wrapped with some 416 * debugging hooks) may be used at or below spllock(), which is 417 * typically at or just below splhigh() (i.e. blocks everything 418 * but certain machine-dependent extremely high priority interrupts). 419 * 420 * spinlockmgr spinlocks should be used at or below splsched(). 421 * 422 * Some platforms may have interrupts of higher priority than splsched(), 423 * including hard serial interrupts, inter-processor interrupts, and 424 * kernel debugger traps. 425 */ 426 427 /* 428 * XXX XXX kludge around another kludge.. 429 * 430 * vfs_shutdown() may be called from interrupt context, either as a result 431 * of a panic, or from the debugger. It proceeds to call 432 * sys_sync(&proc0, ...), pretending its running on behalf of proc0 433 * 434 * We would like to make an attempt to sync the filesystems in this case, so 435 * if this happens, we treat attempts to acquire locks specially. 436 * All locks are acquired on behalf of proc0. 437 * 438 * If we've already paniced, we don't block waiting for locks, but 439 * just barge right ahead since we're already going down in flames. 440 */ 441 442 /* 443 * Set, change, or release a lock. 444 * 445 * Shared requests increment the shared count. Exclusive requests set the 446 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 447 * accepted shared locks and shared-to-exclusive upgrades to go away. 448 */ 449 int 450 #if defined(LOCKDEBUG) 451 _lockmgr(__volatile struct lock *lkp, u_int flags, 452 struct simplelock *interlkp, const char *file, int line) 453 #else 454 lockmgr(__volatile struct lock *lkp, u_int flags, 455 struct simplelock *interlkp) 456 #endif 457 { 458 int error; 459 pid_t pid; 460 int extflags; 461 cpuid_t cpu_id; 462 struct proc *p = curproc; 463 int lock_shutdown_noblock = 0; 464 int s; 465 466 error = 0; 467 468 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 469 if (flags & LK_INTERLOCK) 470 simple_unlock(interlkp); 471 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 472 473 #ifdef DIAGNOSTIC /* { */ 474 /* 475 * Don't allow spins on sleep locks and don't allow sleeps 476 * on spin locks. 477 */ 478 if ((flags ^ lkp->lk_flags) & LK_SPIN) 479 panic("lockmgr: sleep/spin mismatch\n"); 480 #endif /* } */ 481 482 if (extflags & LK_SPIN) 483 pid = LK_KERNPROC; 484 else { 485 if (p == NULL) { 486 if (!doing_shutdown) { 487 #ifdef DIAGNOSTIC 488 panic("lockmgr: no context"); 489 #endif 490 } else { 491 p = &proc0; 492 if (panicstr && (!(flags & LK_NOWAIT))) { 493 flags |= LK_NOWAIT; 494 lock_shutdown_noblock = 1; 495 } 496 } 497 } 498 pid = p->p_pid; 499 } 500 cpu_id = cpu_number(); 501 502 /* 503 * Once a lock has drained, the LK_DRAINING flag is set and an 504 * exclusive lock is returned. The only valid operation thereafter 505 * is a single release of that exclusive lock. This final release 506 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 507 * further requests of any sort will result in a panic. The bits 508 * selected for these two flags are chosen so that they will be set 509 * in memory that is freed (freed memory is filled with 0xdeadbeef). 510 * The final release is permitted to give a new lease on life to 511 * the lock by specifying LK_REENABLE. 512 */ 513 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 514 #ifdef DIAGNOSTIC /* { */ 515 if (lkp->lk_flags & LK_DRAINED) 516 panic("lockmgr: using decommissioned lock"); 517 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 518 WEHOLDIT(lkp, pid, cpu_id) == 0) 519 panic("lockmgr: non-release on draining lock: %d\n", 520 flags & LK_TYPE_MASK); 521 #endif /* DIAGNOSTIC */ /* } */ 522 lkp->lk_flags &= ~LK_DRAINING; 523 if ((flags & LK_REENABLE) == 0) 524 lkp->lk_flags |= LK_DRAINED; 525 } 526 527 switch (flags & LK_TYPE_MASK) { 528 529 case LK_SHARED: 530 if (WEHOLDIT(lkp, pid, cpu_id) == 0) { 531 /* 532 * If just polling, check to see if we will block. 533 */ 534 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 535 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 536 error = EBUSY; 537 break; 538 } 539 /* 540 * Wait for exclusive locks and upgrades to clear. 541 */ 542 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 543 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 544 if (error) 545 break; 546 lkp->lk_sharecount++; 547 COUNT(lkp, p, cpu_id, 1); 548 break; 549 } 550 /* 551 * We hold an exclusive lock, so downgrade it to shared. 552 * An alternative would be to fail with EDEADLK. 553 */ 554 lkp->lk_sharecount++; 555 COUNT(lkp, p, cpu_id, 1); 556 /* fall into downgrade */ 557 558 case LK_DOWNGRADE: 559 if (WEHOLDIT(lkp, pid, cpu_id) == 0 || 560 lkp->lk_exclusivecount == 0) 561 panic("lockmgr: not holding exclusive lock"); 562 lkp->lk_sharecount += lkp->lk_exclusivecount; 563 lkp->lk_exclusivecount = 0; 564 lkp->lk_recurselevel = 0; 565 lkp->lk_flags &= ~LK_HAVE_EXCL; 566 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 567 #if defined(LOCKDEBUG) 568 lkp->lk_unlock_file = file; 569 lkp->lk_unlock_line = line; 570 #endif 571 DONTHAVEIT(lkp); 572 WAKEUP_WAITER(lkp); 573 break; 574 575 case LK_EXCLUPGRADE: 576 /* 577 * If another process is ahead of us to get an upgrade, 578 * then we want to fail rather than have an intervening 579 * exclusive access. 580 */ 581 if (lkp->lk_flags & LK_WANT_UPGRADE) { 582 lkp->lk_sharecount--; 583 COUNT(lkp, p, cpu_id, -1); 584 error = EBUSY; 585 break; 586 } 587 /* fall into normal upgrade */ 588 589 case LK_UPGRADE: 590 /* 591 * Upgrade a shared lock to an exclusive one. If another 592 * shared lock has already requested an upgrade to an 593 * exclusive lock, our shared lock is released and an 594 * exclusive lock is requested (which will be granted 595 * after the upgrade). If we return an error, the file 596 * will always be unlocked. 597 */ 598 if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0) 599 panic("lockmgr: upgrade exclusive lock"); 600 lkp->lk_sharecount--; 601 COUNT(lkp, p, cpu_id, -1); 602 /* 603 * If we are just polling, check to see if we will block. 604 */ 605 if ((extflags & LK_NOWAIT) && 606 ((lkp->lk_flags & LK_WANT_UPGRADE) || 607 lkp->lk_sharecount > 1)) { 608 error = EBUSY; 609 break; 610 } 611 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 612 /* 613 * We are first shared lock to request an upgrade, so 614 * request upgrade and wait for the shared count to 615 * drop to zero, then take exclusive lock. 616 */ 617 lkp->lk_flags |= LK_WANT_UPGRADE; 618 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount); 619 lkp->lk_flags &= ~LK_WANT_UPGRADE; 620 if (error) 621 break; 622 lkp->lk_flags |= LK_HAVE_EXCL; 623 SETHOLDER(lkp, pid, cpu_id); 624 #if defined(LOCKDEBUG) 625 lkp->lk_lock_file = file; 626 lkp->lk_lock_line = line; 627 #endif 628 HAVEIT(lkp); 629 if (lkp->lk_exclusivecount != 0) 630 panic("lockmgr: non-zero exclusive count"); 631 lkp->lk_exclusivecount = 1; 632 if (extflags & LK_SETRECURSE) 633 lkp->lk_recurselevel = 1; 634 COUNT(lkp, p, cpu_id, 1); 635 break; 636 } 637 /* 638 * Someone else has requested upgrade. Release our shared 639 * lock, awaken upgrade requestor if we are the last shared 640 * lock, then request an exclusive lock. 641 */ 642 if (lkp->lk_sharecount == 0) 643 WAKEUP_WAITER(lkp); 644 /* fall into exclusive request */ 645 646 case LK_EXCLUSIVE: 647 if (WEHOLDIT(lkp, pid, cpu_id)) { 648 /* 649 * Recursive lock. 650 */ 651 if ((extflags & LK_CANRECURSE) == 0 && 652 lkp->lk_recurselevel == 0) { 653 if (extflags & LK_RECURSEFAIL) { 654 error = EDEADLK; 655 break; 656 } else 657 panic("lockmgr: locking against myself"); 658 } 659 lkp->lk_exclusivecount++; 660 if (extflags & LK_SETRECURSE && 661 lkp->lk_recurselevel == 0) 662 lkp->lk_recurselevel = lkp->lk_exclusivecount; 663 COUNT(lkp, p, cpu_id, 1); 664 break; 665 } 666 /* 667 * If we are just polling, check to see if we will sleep. 668 */ 669 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 670 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 671 lkp->lk_sharecount != 0)) { 672 error = EBUSY; 673 break; 674 } 675 /* 676 * Try to acquire the want_exclusive flag. 677 */ 678 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 679 (LK_HAVE_EXCL | LK_WANT_EXCL)); 680 if (error) 681 break; 682 lkp->lk_flags |= LK_WANT_EXCL; 683 /* 684 * Wait for shared locks and upgrades to finish. 685 */ 686 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 || 687 (lkp->lk_flags & LK_WANT_UPGRADE)); 688 lkp->lk_flags &= ~LK_WANT_EXCL; 689 if (error) 690 break; 691 lkp->lk_flags |= LK_HAVE_EXCL; 692 SETHOLDER(lkp, pid, cpu_id); 693 #if defined(LOCKDEBUG) 694 lkp->lk_lock_file = file; 695 lkp->lk_lock_line = line; 696 #endif 697 HAVEIT(lkp); 698 if (lkp->lk_exclusivecount != 0) 699 panic("lockmgr: non-zero exclusive count"); 700 lkp->lk_exclusivecount = 1; 701 if (extflags & LK_SETRECURSE) 702 lkp->lk_recurselevel = 1; 703 COUNT(lkp, p, cpu_id, 1); 704 break; 705 706 case LK_RELEASE: 707 if (lkp->lk_exclusivecount != 0) { 708 if (WEHOLDIT(lkp, pid, cpu_id) == 0) { 709 if (lkp->lk_flags & LK_SPIN) { 710 panic("lockmgr: processor %lu, not " 711 "exclusive lock holder %lu " 712 "unlocking", cpu_id, lkp->lk_cpu); 713 } else { 714 panic("lockmgr: pid %d, not " 715 "exclusive lock holder %d " 716 "unlocking", pid, 717 lkp->lk_lockholder); 718 } 719 } 720 if (lkp->lk_exclusivecount == lkp->lk_recurselevel) 721 lkp->lk_recurselevel = 0; 722 lkp->lk_exclusivecount--; 723 COUNT(lkp, p, cpu_id, -1); 724 if (lkp->lk_exclusivecount == 0) { 725 lkp->lk_flags &= ~LK_HAVE_EXCL; 726 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 727 #if defined(LOCKDEBUG) 728 lkp->lk_unlock_file = file; 729 lkp->lk_unlock_line = line; 730 #endif 731 DONTHAVEIT(lkp); 732 } 733 } else if (lkp->lk_sharecount != 0) { 734 lkp->lk_sharecount--; 735 COUNT(lkp, p, cpu_id, -1); 736 } 737 #ifdef DIAGNOSTIC 738 else 739 panic("lockmgr: release of unlocked lock!"); 740 #endif 741 WAKEUP_WAITER(lkp); 742 break; 743 744 case LK_DRAIN: 745 /* 746 * Check that we do not already hold the lock, as it can 747 * never drain if we do. Unfortunately, we have no way to 748 * check for holding a shared lock, but at least we can 749 * check for an exclusive one. 750 */ 751 if (WEHOLDIT(lkp, pid, cpu_id)) 752 panic("lockmgr: draining against myself"); 753 /* 754 * If we are just polling, check to see if we will sleep. 755 */ 756 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 757 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 758 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 759 error = EBUSY; 760 break; 761 } 762 ACQUIRE(lkp, error, extflags, 1, 763 ((lkp->lk_flags & 764 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 765 lkp->lk_sharecount != 0 || 766 lkp->lk_waitcount != 0)); 767 if (error) 768 break; 769 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 770 SETHOLDER(lkp, pid, cpu_id); 771 #if defined(LOCKDEBUG) 772 lkp->lk_lock_file = file; 773 lkp->lk_lock_line = line; 774 #endif 775 HAVEIT(lkp); 776 lkp->lk_exclusivecount = 1; 777 /* XXX unlikely that we'd want this */ 778 if (extflags & LK_SETRECURSE) 779 lkp->lk_recurselevel = 1; 780 COUNT(lkp, p, cpu_id, 1); 781 break; 782 783 default: 784 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 785 panic("lockmgr: unknown locktype request %d", 786 flags & LK_TYPE_MASK); 787 /* NOTREACHED */ 788 } 789 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN && 790 ((lkp->lk_flags & 791 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 792 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 793 lkp->lk_flags &= ~LK_WAITDRAIN; 794 wakeup((void *)&lkp->lk_flags); 795 } 796 /* 797 * Note that this panic will be a recursive panic, since 798 * we only set lock_shutdown_noblock above if panicstr != NULL. 799 */ 800 if (error && lock_shutdown_noblock) 801 panic("lockmgr: deadlock (see previous panic)"); 802 803 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 804 return (error); 805 } 806 807 /* 808 * For a recursive spinlock held one or more times by the current CPU, 809 * release all N locks, and return N. 810 * Intended for use in mi_switch() shortly before context switching. 811 */ 812 813 int 814 #if defined(LOCKDEBUG) 815 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line) 816 #else 817 spinlock_release_all(__volatile struct lock *lkp) 818 #endif 819 { 820 int s, count; 821 cpuid_t cpu_id; 822 823 KASSERT(lkp->lk_flags & LK_SPIN); 824 825 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 826 827 cpu_id = cpu_number(); 828 count = lkp->lk_exclusivecount; 829 830 if (count != 0) { 831 #ifdef DIAGNOSTIC 832 if (WEHOLDIT(lkp, 0, cpu_id) == 0) { 833 panic("spinlock_release_all: processor %lu, not " 834 "exclusive lock holder %lu " 835 "unlocking", (long)cpu_id, lkp->lk_cpu); 836 } 837 #endif 838 lkp->lk_recurselevel = 0; 839 lkp->lk_exclusivecount = 0; 840 COUNT_CPU(cpu_id, -count); 841 lkp->lk_flags &= ~LK_HAVE_EXCL; 842 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 843 #if defined(LOCKDEBUG) 844 lkp->lk_unlock_file = file; 845 lkp->lk_unlock_line = line; 846 #endif 847 DONTHAVEIT(lkp); 848 } 849 #ifdef DIAGNOSTIC 850 else if (lkp->lk_sharecount != 0) 851 panic("spinlock_release_all: release of shared lock!"); 852 else 853 panic("spinlock_release_all: release of unlocked lock!"); 854 #endif 855 INTERLOCK_RELEASE(lkp, LK_SPIN, s); 856 857 return (count); 858 } 859 860 /* 861 * For a recursive spinlock held one or more times by the current CPU, 862 * release all N locks, and return N. 863 * Intended for use in mi_switch() right after resuming execution. 864 */ 865 866 void 867 #if defined(LOCKDEBUG) 868 _spinlock_acquire_count(__volatile struct lock *lkp, int count, 869 const char *file, int line) 870 #else 871 spinlock_acquire_count(__volatile struct lock *lkp, int count) 872 #endif 873 { 874 int s, error; 875 cpuid_t cpu_id; 876 877 KASSERT(lkp->lk_flags & LK_SPIN); 878 879 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 880 881 cpu_id = cpu_number(); 882 883 #ifdef DIAGNOSTIC 884 if (WEHOLDIT(lkp, LK_NOPROC, cpu_id)) 885 panic("spinlock_acquire_count: processor %lu already holds lock\n", (long)cpu_id); 886 #endif 887 /* 888 * Try to acquire the want_exclusive flag. 889 */ 890 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags & 891 (LK_HAVE_EXCL | LK_WANT_EXCL)); 892 lkp->lk_flags |= LK_WANT_EXCL; 893 /* 894 * Wait for shared locks and upgrades to finish. 895 */ 896 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 || 897 (lkp->lk_flags & LK_WANT_UPGRADE)); 898 lkp->lk_flags &= ~LK_WANT_EXCL; 899 lkp->lk_flags |= LK_HAVE_EXCL; 900 SETHOLDER(lkp, LK_NOPROC, cpu_id); 901 #if defined(LOCKDEBUG) 902 lkp->lk_lock_file = file; 903 lkp->lk_lock_line = line; 904 #endif 905 HAVEIT(lkp); 906 if (lkp->lk_exclusivecount != 0) 907 panic("lockmgr: non-zero exclusive count"); 908 lkp->lk_exclusivecount = count; 909 lkp->lk_recurselevel = 1; 910 COUNT_CPU(cpu_id, count); 911 912 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 913 } 914 915 916 917 /* 918 * Print out information about state of a lock. Used by VOP_PRINT 919 * routines to display ststus about contained locks. 920 */ 921 void 922 lockmgr_printinfo(__volatile struct lock *lkp) 923 { 924 925 if (lkp->lk_sharecount) 926 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 927 lkp->lk_sharecount); 928 else if (lkp->lk_flags & LK_HAVE_EXCL) { 929 printf(" lock type %s: EXCL (count %d) by ", 930 lkp->lk_wmesg, lkp->lk_exclusivecount); 931 if (lkp->lk_flags & LK_SPIN) 932 printf("processor %lu", lkp->lk_cpu); 933 else 934 printf("pid %d", lkp->lk_lockholder); 935 } else 936 printf(" not locked"); 937 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0) 938 printf(" with %d pending", lkp->lk_waitcount); 939 } 940 941 #if defined(LOCKDEBUG) /* { */ 942 TAILQ_HEAD(, simplelock) simplelock_list = 943 TAILQ_HEAD_INITIALIZER(simplelock_list); 944 945 #if defined(MULTIPROCESSOR) /* { */ 946 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER; 947 948 #define SLOCK_LIST_LOCK() \ 949 __cpu_simple_lock(&simplelock_list_slock.lock_data) 950 951 #define SLOCK_LIST_UNLOCK() \ 952 __cpu_simple_unlock(&simplelock_list_slock.lock_data) 953 954 #define SLOCK_COUNT(x) \ 955 curcpu()->ci_simple_locks += (x) 956 #else 957 u_long simple_locks; 958 959 #define SLOCK_LIST_LOCK() /* nothing */ 960 961 #define SLOCK_LIST_UNLOCK() /* nothing */ 962 963 #define SLOCK_COUNT(x) simple_locks += (x) 964 #endif /* MULTIPROCESSOR */ /* } */ 965 966 #ifdef DDB /* { */ 967 #ifdef MULTIPROCESSOR 968 int simple_lock_debugger = 1; /* more serious on MP */ 969 #else 970 int simple_lock_debugger = 0; 971 #endif 972 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger() 973 #define SLOCK_TRACE() \ 974 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \ 975 TRUE, 65535, "", printf); 976 #else 977 #define SLOCK_DEBUGGER() /* nothing */ 978 #define SLOCK_TRACE() /* nothing */ 979 #endif /* } */ 980 981 #ifdef MULTIPROCESSOR 982 #define SLOCK_MP() lock_printf("on cpu %ld\n", \ 983 (u_long) cpu_number()) 984 #else 985 #define SLOCK_MP() /* nothing */ 986 #endif 987 988 #define SLOCK_WHERE(str, alp, id, l) \ 989 do { \ 990 lock_printf("\n"); \ 991 lock_printf(str); \ 992 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \ 993 SLOCK_MP(); \ 994 if ((alp)->lock_file != NULL) \ 995 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \ 996 (alp)->lock_line); \ 997 if ((alp)->unlock_file != NULL) \ 998 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \ 999 (alp)->unlock_line); \ 1000 SLOCK_TRACE() \ 1001 SLOCK_DEBUGGER(); \ 1002 } while (/*CONSTCOND*/0) 1003 1004 /* 1005 * Simple lock functions so that the debugger can see from whence 1006 * they are being called. 1007 */ 1008 void 1009 simple_lock_init(struct simplelock *alp) 1010 { 1011 1012 #if defined(MULTIPROCESSOR) /* { */ 1013 __cpu_simple_lock_init(&alp->lock_data); 1014 #else 1015 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1016 #endif /* } */ 1017 alp->lock_file = NULL; 1018 alp->lock_line = 0; 1019 alp->unlock_file = NULL; 1020 alp->unlock_line = 0; 1021 alp->lock_holder = LK_NOCPU; 1022 } 1023 1024 void 1025 _simple_lock(__volatile struct simplelock *alp, const char *id, int l) 1026 { 1027 cpuid_t cpu_id = cpu_number(); 1028 int s; 1029 1030 s = spllock(); 1031 1032 /* 1033 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1034 * don't take any action, and just fall into the normal spin case. 1035 */ 1036 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1037 #if defined(MULTIPROCESSOR) /* { */ 1038 if (alp->lock_holder == cpu_id) { 1039 SLOCK_WHERE("simple_lock: locking against myself\n", 1040 alp, id, l); 1041 goto out; 1042 } 1043 #else 1044 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l); 1045 goto out; 1046 #endif /* MULTIPROCESSOR */ /* } */ 1047 } 1048 1049 #if defined(MULTIPROCESSOR) /* { */ 1050 /* Acquire the lock before modifying any fields. */ 1051 __cpu_simple_lock(&alp->lock_data); 1052 #else 1053 alp->lock_data = __SIMPLELOCK_LOCKED; 1054 #endif /* } */ 1055 1056 if (alp->lock_holder != LK_NOCPU) { 1057 SLOCK_WHERE("simple_lock: uninitialized lock\n", 1058 alp, id, l); 1059 } 1060 alp->lock_file = id; 1061 alp->lock_line = l; 1062 alp->lock_holder = cpu_id; 1063 1064 SLOCK_LIST_LOCK(); 1065 /* XXX Cast away volatile */ 1066 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1067 SLOCK_LIST_UNLOCK(); 1068 1069 SLOCK_COUNT(1); 1070 1071 out: 1072 splx(s); 1073 } 1074 1075 int 1076 _simple_lock_held(__volatile struct simplelock *alp) 1077 { 1078 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC) 1079 cpuid_t cpu_id = cpu_number(); 1080 #endif 1081 int s, locked = 0; 1082 1083 s = spllock(); 1084 1085 #if defined(MULTIPROCESSOR) 1086 if (__cpu_simple_lock_try(&alp->lock_data) == 0) 1087 locked = (alp->lock_holder == cpu_id); 1088 else 1089 __cpu_simple_unlock(&alp->lock_data); 1090 #else 1091 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1092 locked = 1; 1093 KASSERT(alp->lock_holder == cpu_id); 1094 } 1095 #endif 1096 1097 splx(s); 1098 1099 return (locked); 1100 } 1101 1102 int 1103 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l) 1104 { 1105 cpuid_t cpu_id = cpu_number(); 1106 int s, rv = 0; 1107 1108 s = spllock(); 1109 1110 /* 1111 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1112 * don't take any action. 1113 */ 1114 #if defined(MULTIPROCESSOR) /* { */ 1115 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) { 1116 if (alp->lock_holder == cpu_id) 1117 SLOCK_WHERE("simple_lock_try: locking against myself\n", 1118 alp, id, l); 1119 goto out; 1120 } 1121 #else 1122 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1123 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l); 1124 goto out; 1125 } 1126 alp->lock_data = __SIMPLELOCK_LOCKED; 1127 #endif /* MULTIPROCESSOR */ /* } */ 1128 1129 /* 1130 * At this point, we have acquired the lock. 1131 */ 1132 1133 rv = 1; 1134 1135 alp->lock_file = id; 1136 alp->lock_line = l; 1137 alp->lock_holder = cpu_id; 1138 1139 SLOCK_LIST_LOCK(); 1140 /* XXX Cast away volatile. */ 1141 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1142 SLOCK_LIST_UNLOCK(); 1143 1144 SLOCK_COUNT(1); 1145 1146 out: 1147 splx(s); 1148 return (rv); 1149 } 1150 1151 void 1152 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l) 1153 { 1154 int s; 1155 1156 s = spllock(); 1157 1158 /* 1159 * MULTIPROCESSOR case: This is `safe' because we think we hold 1160 * the lock, and if we don't, we don't take any action. 1161 */ 1162 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) { 1163 SLOCK_WHERE("simple_unlock: lock not held\n", 1164 alp, id, l); 1165 goto out; 1166 } 1167 1168 SLOCK_LIST_LOCK(); 1169 TAILQ_REMOVE(&simplelock_list, alp, list); 1170 SLOCK_LIST_UNLOCK(); 1171 1172 SLOCK_COUNT(-1); 1173 1174 alp->list.tqe_next = NULL; /* sanity */ 1175 alp->list.tqe_prev = NULL; /* sanity */ 1176 1177 alp->unlock_file = id; 1178 alp->unlock_line = l; 1179 1180 #if defined(MULTIPROCESSOR) /* { */ 1181 alp->lock_holder = LK_NOCPU; 1182 /* Now that we've modified all fields, release the lock. */ 1183 __cpu_simple_unlock(&alp->lock_data); 1184 #else 1185 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1186 KASSERT(alp->lock_holder == cpu_number()); 1187 alp->lock_holder = LK_NOCPU; 1188 #endif /* } */ 1189 1190 out: 1191 splx(s); 1192 } 1193 1194 void 1195 simple_lock_dump(void) 1196 { 1197 struct simplelock *alp; 1198 int s; 1199 1200 s = spllock(); 1201 SLOCK_LIST_LOCK(); 1202 lock_printf("all simple locks:\n"); 1203 TAILQ_FOREACH(alp, &simplelock_list, list) { 1204 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder, 1205 alp->lock_file, alp->lock_line); 1206 } 1207 SLOCK_LIST_UNLOCK(); 1208 splx(s); 1209 } 1210 1211 void 1212 simple_lock_freecheck(void *start, void *end) 1213 { 1214 struct simplelock *alp; 1215 int s; 1216 1217 s = spllock(); 1218 SLOCK_LIST_LOCK(); 1219 TAILQ_FOREACH(alp, &simplelock_list, list) { 1220 if ((void *)alp >= start && (void *)alp < end) { 1221 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n", 1222 alp, alp->lock_holder, alp->lock_file, 1223 alp->lock_line); 1224 SLOCK_DEBUGGER(); 1225 } 1226 } 1227 SLOCK_LIST_UNLOCK(); 1228 splx(s); 1229 } 1230 1231 /* 1232 * We must be holding exactly one lock: the sched_lock. 1233 */ 1234 1235 void 1236 simple_lock_switchcheck(void) 1237 { 1238 1239 simple_lock_only_held(&sched_lock, "switching"); 1240 } 1241 1242 void 1243 simple_lock_only_held(volatile struct simplelock *lp, const char *where) 1244 { 1245 struct simplelock *alp; 1246 cpuid_t cpu_id = cpu_number(); 1247 int s; 1248 1249 if (lp) { 1250 LOCK_ASSERT(simple_lock_held(lp)); 1251 } 1252 s = spllock(); 1253 SLOCK_LIST_LOCK(); 1254 TAILQ_FOREACH(alp, &simplelock_list, list) { 1255 if (alp == lp) 1256 continue; 1257 if (alp->lock_holder == cpu_id) 1258 break; 1259 } 1260 SLOCK_LIST_UNLOCK(); 1261 splx(s); 1262 1263 if (alp != NULL) { 1264 lock_printf("\n%s with held simple_lock %p " 1265 "CPU %lu %s:%d\n", 1266 where, alp, alp->lock_holder, alp->lock_file, 1267 alp->lock_line); 1268 SLOCK_TRACE(); 1269 SLOCK_DEBUGGER(); 1270 } 1271 } 1272 #endif /* LOCKDEBUG */ /* } */ 1273