1 /* $NetBSD: kern_lock.c,v 1.75 2004/02/13 11:36:22 wiz Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * This code is derived from software contributed to The NetBSD Foundation 12 * by Ross Harvey. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the NetBSD 25 * Foundation, Inc. and its contributors. 26 * 4. Neither the name of The NetBSD Foundation nor the names of its 27 * contributors may be used to endorse or promote products derived 28 * from this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 1995 45 * The Regents of the University of California. All rights reserved. 46 * 47 * This code contains ideas from software contributed to Berkeley by 48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 49 * System project at Carnegie-Mellon University. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. Neither the name of the University nor the names of its contributors 60 * may be used to endorse or promote products derived from this software 61 * without specific prior written permission. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 73 * SUCH DAMAGE. 74 * 75 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.75 2004/02/13 11:36:22 wiz Exp $"); 80 81 #include "opt_multiprocessor.h" 82 #include "opt_lockdebug.h" 83 #include "opt_ddb.h" 84 85 #include <sys/param.h> 86 #include <sys/proc.h> 87 #include <sys/lock.h> 88 #include <sys/systm.h> 89 #include <machine/cpu.h> 90 91 #if defined(LOCKDEBUG) 92 #include <sys/syslog.h> 93 /* 94 * note that stdarg.h and the ansi style va_start macro is used for both 95 * ansi and traditional c compiles. 96 * XXX: this requires that stdarg.h define: va_alist and va_dcl 97 */ 98 #include <machine/stdarg.h> 99 100 void lock_printf(const char *fmt, ...) 101 __attribute__((__format__(__printf__,1,2))); 102 103 static int acquire(__volatile struct lock *, int *, int, int, int); 104 105 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */ 106 107 #ifdef DDB 108 #include <ddb/ddbvar.h> 109 #include <machine/db_machdep.h> 110 #include <ddb/db_command.h> 111 #include <ddb/db_interface.h> 112 #endif 113 #endif 114 115 /* 116 * Locking primitives implementation. 117 * Locks provide shared/exclusive synchronization. 118 */ 119 120 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */ 121 #if defined(MULTIPROCESSOR) /* { */ 122 #define COUNT_CPU(cpu_id, x) \ 123 curcpu()->ci_spin_locks += (x) 124 #else 125 u_long spin_locks; 126 #define COUNT_CPU(cpu_id, x) spin_locks += (x) 127 #endif /* MULTIPROCESSOR */ /* } */ 128 129 #define COUNT(lkp, l, cpu_id, x) \ 130 do { \ 131 if ((lkp)->lk_flags & LK_SPIN) \ 132 COUNT_CPU((cpu_id), (x)); \ 133 else \ 134 (l)->l_locks += (x); \ 135 } while (/*CONSTCOND*/0) 136 #else 137 #define COUNT(lkp, p, cpu_id, x) 138 #define COUNT_CPU(cpu_id, x) 139 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */ 140 141 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */ 142 #define SPINLOCK_SPIN_HOOK /* nothing */ 143 #endif 144 145 #define INTERLOCK_ACQUIRE(lkp, flags, s) \ 146 do { \ 147 if ((flags) & LK_SPIN) \ 148 s = spllock(); \ 149 simple_lock(&(lkp)->lk_interlock); \ 150 } while (/*CONSTCOND*/ 0) 151 152 #define INTERLOCK_RELEASE(lkp, flags, s) \ 153 do { \ 154 simple_unlock(&(lkp)->lk_interlock); \ 155 if ((flags) & LK_SPIN) \ 156 splx(s); \ 157 } while (/*CONSTCOND*/ 0) 158 159 #ifdef DDB /* { */ 160 #ifdef MULTIPROCESSOR 161 int simple_lock_debugger = 1; /* more serious on MP */ 162 #else 163 int simple_lock_debugger = 0; 164 #endif 165 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger() 166 #define SLOCK_TRACE() \ 167 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \ 168 TRUE, 65535, "", lock_printf); 169 #else 170 #define SLOCK_DEBUGGER() /* nothing */ 171 #define SLOCK_TRACE() /* nothing */ 172 #endif /* } */ 173 174 #if defined(LOCKDEBUG) 175 #if defined(DDB) 176 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger() 177 #else 178 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */ 179 #endif 180 181 #define SPINLOCK_SPINCHECK_DECL \ 182 /* 32-bits of count -- wrap constitutes a "spinout" */ \ 183 uint32_t __spinc = 0 184 185 #define SPINLOCK_SPINCHECK \ 186 do { \ 187 if (++__spinc == 0) { \ 188 lock_printf("LK_SPIN spinout, excl %d, share %d\n", \ 189 lkp->lk_exclusivecount, lkp->lk_sharecount); \ 190 if (lkp->lk_exclusivecount) \ 191 lock_printf("held by CPU %lu\n", \ 192 (u_long) lkp->lk_cpu); \ 193 if (lkp->lk_lock_file) \ 194 lock_printf("last locked at %s:%d\n", \ 195 lkp->lk_lock_file, lkp->lk_lock_line); \ 196 if (lkp->lk_unlock_file) \ 197 lock_printf("last unlocked at %s:%d\n", \ 198 lkp->lk_unlock_file, lkp->lk_unlock_line); \ 199 SLOCK_TRACE(); \ 200 SPINLOCK_SPINCHECK_DEBUGGER; \ 201 } \ 202 } while (/*CONSTCOND*/ 0) 203 #else 204 #define SPINLOCK_SPINCHECK_DECL /* nothing */ 205 #define SPINLOCK_SPINCHECK /* nothing */ 206 #endif /* LOCKDEBUG && DDB */ 207 208 /* 209 * Acquire a resource. 210 */ 211 static int 212 acquire(__volatile struct lock *lkp, int *s, int extflags, 213 int drain, int wanted) 214 { 215 int error; 216 217 KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0); 218 219 if (extflags & LK_SPIN) { 220 int interlocked; 221 222 SPINLOCK_SPINCHECK_DECL; 223 224 if (!drain) { 225 lkp->lk_waitcount++; 226 lkp->lk_flags |= LK_WAIT_NONZERO; 227 } 228 for (interlocked = 1;;) { 229 SPINLOCK_SPINCHECK; 230 if ((lkp->lk_flags & wanted) != 0) { 231 if (interlocked) { 232 INTERLOCK_RELEASE(lkp, LK_SPIN, *s); 233 interlocked = 0; 234 } 235 SPINLOCK_SPIN_HOOK; 236 } else if (interlocked) { 237 break; 238 } else { 239 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s); 240 interlocked = 1; 241 } 242 } 243 if (!drain) { 244 lkp->lk_waitcount--; 245 if (lkp->lk_waitcount == 0) 246 lkp->lk_flags &= ~LK_WAIT_NONZERO; 247 } 248 KASSERT((lkp->lk_flags & wanted) == 0); 249 error = 0; /* sanity */ 250 } else { 251 for (error = 0; (lkp->lk_flags & wanted) != 0; ) { 252 if (drain) 253 lkp->lk_flags |= LK_WAITDRAIN; 254 else { 255 lkp->lk_waitcount++; 256 lkp->lk_flags |= LK_WAIT_NONZERO; 257 } 258 /* XXX Cast away volatile. */ 259 error = ltsleep(drain ? 260 (void *)&lkp->lk_flags : 261 (void *)lkp, lkp->lk_prio, 262 lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock); 263 if (!drain) { 264 lkp->lk_waitcount--; 265 if (lkp->lk_waitcount == 0) 266 lkp->lk_flags &= ~LK_WAIT_NONZERO; 267 } 268 if (error) 269 break; 270 if (extflags & LK_SLEEPFAIL) { 271 error = ENOLCK; 272 break; 273 } 274 } 275 } 276 277 return error; 278 } 279 280 #define SETHOLDER(lkp, pid, lid, cpu_id) \ 281 do { \ 282 if ((lkp)->lk_flags & LK_SPIN) \ 283 (lkp)->lk_cpu = cpu_id; \ 284 else { \ 285 (lkp)->lk_lockholder = pid; \ 286 (lkp)->lk_locklwp = lid; \ 287 } \ 288 } while (/*CONSTCOND*/0) 289 290 #define WEHOLDIT(lkp, pid, lid, cpu_id) \ 291 (((lkp)->lk_flags & LK_SPIN) != 0 ? \ 292 ((lkp)->lk_cpu == (cpu_id)) : \ 293 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid))) 294 295 #define WAKEUP_WAITER(lkp) \ 296 do { \ 297 if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) == \ 298 LK_WAIT_NONZERO) { \ 299 /* XXX Cast away volatile. */ \ 300 wakeup((void *)(lkp)); \ 301 } \ 302 } while (/*CONSTCOND*/0) 303 304 #if defined(LOCKDEBUG) /* { */ 305 #if defined(MULTIPROCESSOR) /* { */ 306 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER; 307 308 #define SPINLOCK_LIST_LOCK() \ 309 __cpu_simple_lock(&spinlock_list_slock.lock_data) 310 311 #define SPINLOCK_LIST_UNLOCK() \ 312 __cpu_simple_unlock(&spinlock_list_slock.lock_data) 313 #else 314 #define SPINLOCK_LIST_LOCK() /* nothing */ 315 316 #define SPINLOCK_LIST_UNLOCK() /* nothing */ 317 #endif /* MULTIPROCESSOR */ /* } */ 318 319 TAILQ_HEAD(, lock) spinlock_list = 320 TAILQ_HEAD_INITIALIZER(spinlock_list); 321 322 #define HAVEIT(lkp) \ 323 do { \ 324 if ((lkp)->lk_flags & LK_SPIN) { \ 325 int s = spllock(); \ 326 SPINLOCK_LIST_LOCK(); \ 327 /* XXX Cast away volatile. */ \ 328 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \ 329 lk_list); \ 330 SPINLOCK_LIST_UNLOCK(); \ 331 splx(s); \ 332 } \ 333 } while (/*CONSTCOND*/0) 334 335 #define DONTHAVEIT(lkp) \ 336 do { \ 337 if ((lkp)->lk_flags & LK_SPIN) { \ 338 int s = spllock(); \ 339 SPINLOCK_LIST_LOCK(); \ 340 /* XXX Cast away volatile. */ \ 341 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \ 342 lk_list); \ 343 SPINLOCK_LIST_UNLOCK(); \ 344 splx(s); \ 345 } \ 346 } while (/*CONSTCOND*/0) 347 #else 348 #define HAVEIT(lkp) /* nothing */ 349 350 #define DONTHAVEIT(lkp) /* nothing */ 351 #endif /* LOCKDEBUG */ /* } */ 352 353 #if defined(LOCKDEBUG) 354 /* 355 * Lock debug printing routine; can be configured to print to console 356 * or log to syslog. 357 */ 358 void 359 lock_printf(const char *fmt, ...) 360 { 361 char b[150]; 362 va_list ap; 363 364 va_start(ap, fmt); 365 if (lock_debug_syslog) 366 vlog(LOG_DEBUG, fmt, ap); 367 else { 368 vsnprintf(b, sizeof(b), fmt, ap); 369 printf_nolog("%s", b); 370 } 371 va_end(ap); 372 } 373 #endif /* LOCKDEBUG */ 374 375 /* 376 * Initialize a lock; required before use. 377 */ 378 void 379 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags) 380 { 381 382 memset(lkp, 0, sizeof(struct lock)); 383 simple_lock_init(&lkp->lk_interlock); 384 lkp->lk_flags = flags & LK_EXTFLG_MASK; 385 if (flags & LK_SPIN) 386 lkp->lk_cpu = LK_NOCPU; 387 else { 388 lkp->lk_lockholder = LK_NOPROC; 389 lkp->lk_prio = prio; 390 lkp->lk_timo = timo; 391 } 392 lkp->lk_wmesg = wmesg; /* just a name for spin locks */ 393 #if defined(LOCKDEBUG) 394 lkp->lk_lock_file = NULL; 395 lkp->lk_unlock_file = NULL; 396 #endif 397 } 398 399 /* 400 * Determine the status of a lock. 401 */ 402 int 403 lockstatus(struct lock *lkp) 404 { 405 int s = 0, lock_type = 0; 406 407 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 408 if (lkp->lk_exclusivecount != 0) 409 lock_type = LK_EXCLUSIVE; 410 else if (lkp->lk_sharecount != 0) 411 lock_type = LK_SHARED; 412 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 413 return (lock_type); 414 } 415 416 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) 417 /* 418 * Make sure no spin locks are held by a CPU that is about 419 * to context switch. 420 */ 421 void 422 spinlock_switchcheck(void) 423 { 424 u_long cnt; 425 int s; 426 427 s = spllock(); 428 #if defined(MULTIPROCESSOR) 429 cnt = curcpu()->ci_spin_locks; 430 #else 431 cnt = spin_locks; 432 #endif 433 splx(s); 434 435 if (cnt != 0) 436 panic("spinlock_switchcheck: CPU %lu has %lu spin locks", 437 (u_long) cpu_number(), cnt); 438 } 439 #endif /* LOCKDEBUG || DIAGNOSTIC */ 440 441 /* 442 * Locks and IPLs (interrupt priority levels): 443 * 444 * Locks which may be taken from interrupt context must be handled 445 * very carefully; you must spl to the highest IPL where the lock 446 * is needed before acquiring the lock. 447 * 448 * It is also important to avoid deadlock, since certain (very high 449 * priority) interrupts are often needed to keep the system as a whole 450 * from deadlocking, and must not be blocked while you are spinning 451 * waiting for a lower-priority lock. 452 * 453 * In addition, the lock-debugging hooks themselves need to use locks! 454 * 455 * A raw __cpu_simple_lock may be used from interrupts are long as it 456 * is acquired and held at a single IPL. 457 * 458 * A simple_lock (which is a __cpu_simple_lock wrapped with some 459 * debugging hooks) may be used at or below spllock(), which is 460 * typically at or just below splhigh() (i.e. blocks everything 461 * but certain machine-dependent extremely high priority interrupts). 462 * 463 * spinlockmgr spinlocks should be used at or below splsched(). 464 * 465 * Some platforms may have interrupts of higher priority than splsched(), 466 * including hard serial interrupts, inter-processor interrupts, and 467 * kernel debugger traps. 468 */ 469 470 /* 471 * XXX XXX kludge around another kludge.. 472 * 473 * vfs_shutdown() may be called from interrupt context, either as a result 474 * of a panic, or from the debugger. It proceeds to call 475 * sys_sync(&proc0, ...), pretending its running on behalf of proc0 476 * 477 * We would like to make an attempt to sync the filesystems in this case, so 478 * if this happens, we treat attempts to acquire locks specially. 479 * All locks are acquired on behalf of proc0. 480 * 481 * If we've already paniced, we don't block waiting for locks, but 482 * just barge right ahead since we're already going down in flames. 483 */ 484 485 /* 486 * Set, change, or release a lock. 487 * 488 * Shared requests increment the shared count. Exclusive requests set the 489 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 490 * accepted shared locks and shared-to-exclusive upgrades to go away. 491 */ 492 int 493 #if defined(LOCKDEBUG) 494 _lockmgr(__volatile struct lock *lkp, u_int flags, 495 struct simplelock *interlkp, const char *file, int line) 496 #else 497 lockmgr(__volatile struct lock *lkp, u_int flags, 498 struct simplelock *interlkp) 499 #endif 500 { 501 int error; 502 pid_t pid; 503 lwpid_t lid; 504 int extflags; 505 cpuid_t cpu_id; 506 struct lwp *l = curlwp; 507 int lock_shutdown_noblock = 0; 508 int s = 0; 509 510 error = 0; 511 512 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 513 if (flags & LK_INTERLOCK) 514 simple_unlock(interlkp); 515 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 516 517 #ifdef DIAGNOSTIC /* { */ 518 /* 519 * Don't allow spins on sleep locks and don't allow sleeps 520 * on spin locks. 521 */ 522 if ((flags ^ lkp->lk_flags) & LK_SPIN) 523 panic("lockmgr: sleep/spin mismatch"); 524 #endif /* } */ 525 526 if (extflags & LK_SPIN) { 527 pid = LK_KERNPROC; 528 lid = 0; 529 } else { 530 if (l == NULL) { 531 if (!doing_shutdown) { 532 panic("lockmgr: no context"); 533 } else { 534 l = &lwp0; 535 if (panicstr && (!(flags & LK_NOWAIT))) { 536 flags |= LK_NOWAIT; 537 lock_shutdown_noblock = 1; 538 } 539 } 540 } 541 lid = l->l_lid; 542 pid = l->l_proc->p_pid; 543 } 544 cpu_id = cpu_number(); 545 546 /* 547 * Once a lock has drained, the LK_DRAINING flag is set and an 548 * exclusive lock is returned. The only valid operation thereafter 549 * is a single release of that exclusive lock. This final release 550 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 551 * further requests of any sort will result in a panic. The bits 552 * selected for these two flags are chosen so that they will be set 553 * in memory that is freed (freed memory is filled with 0xdeadbeef). 554 * The final release is permitted to give a new lease on life to 555 * the lock by specifying LK_REENABLE. 556 */ 557 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 558 #ifdef DIAGNOSTIC /* { */ 559 if (lkp->lk_flags & LK_DRAINED) 560 panic("lockmgr: using decommissioned lock"); 561 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 562 WEHOLDIT(lkp, pid, lid, cpu_id) == 0) 563 panic("lockmgr: non-release on draining lock: %d", 564 flags & LK_TYPE_MASK); 565 #endif /* DIAGNOSTIC */ /* } */ 566 lkp->lk_flags &= ~LK_DRAINING; 567 if ((flags & LK_REENABLE) == 0) 568 lkp->lk_flags |= LK_DRAINED; 569 } 570 571 switch (flags & LK_TYPE_MASK) { 572 573 case LK_SHARED: 574 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) { 575 /* 576 * If just polling, check to see if we will block. 577 */ 578 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 579 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 580 error = EBUSY; 581 break; 582 } 583 /* 584 * Wait for exclusive locks and upgrades to clear. 585 */ 586 error = acquire(lkp, &s, extflags, 0, 587 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE); 588 if (error) 589 break; 590 lkp->lk_sharecount++; 591 lkp->lk_flags |= LK_SHARE_NONZERO; 592 COUNT(lkp, l, cpu_id, 1); 593 break; 594 } 595 /* 596 * We hold an exclusive lock, so downgrade it to shared. 597 * An alternative would be to fail with EDEADLK. 598 */ 599 lkp->lk_sharecount++; 600 lkp->lk_flags |= LK_SHARE_NONZERO; 601 COUNT(lkp, l, cpu_id, 1); 602 /* fall into downgrade */ 603 604 case LK_DOWNGRADE: 605 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 || 606 lkp->lk_exclusivecount == 0) 607 panic("lockmgr: not holding exclusive lock"); 608 lkp->lk_sharecount += lkp->lk_exclusivecount; 609 lkp->lk_flags |= LK_SHARE_NONZERO; 610 lkp->lk_exclusivecount = 0; 611 lkp->lk_recurselevel = 0; 612 lkp->lk_flags &= ~LK_HAVE_EXCL; 613 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 614 #if defined(LOCKDEBUG) 615 lkp->lk_unlock_file = file; 616 lkp->lk_unlock_line = line; 617 #endif 618 DONTHAVEIT(lkp); 619 WAKEUP_WAITER(lkp); 620 break; 621 622 case LK_EXCLUPGRADE: 623 /* 624 * If another process is ahead of us to get an upgrade, 625 * then we want to fail rather than have an intervening 626 * exclusive access. 627 */ 628 if (lkp->lk_flags & LK_WANT_UPGRADE) { 629 lkp->lk_sharecount--; 630 if (lkp->lk_sharecount == 0) 631 lkp->lk_flags &= ~LK_SHARE_NONZERO; 632 COUNT(lkp, l, cpu_id, -1); 633 error = EBUSY; 634 break; 635 } 636 /* fall into normal upgrade */ 637 638 case LK_UPGRADE: 639 /* 640 * Upgrade a shared lock to an exclusive one. If another 641 * shared lock has already requested an upgrade to an 642 * exclusive lock, our shared lock is released and an 643 * exclusive lock is requested (which will be granted 644 * after the upgrade). If we return an error, the file 645 * will always be unlocked. 646 */ 647 if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0) 648 panic("lockmgr: upgrade exclusive lock"); 649 lkp->lk_sharecount--; 650 if (lkp->lk_sharecount == 0) 651 lkp->lk_flags &= ~LK_SHARE_NONZERO; 652 COUNT(lkp, l, cpu_id, -1); 653 /* 654 * If we are just polling, check to see if we will block. 655 */ 656 if ((extflags & LK_NOWAIT) && 657 ((lkp->lk_flags & LK_WANT_UPGRADE) || 658 lkp->lk_sharecount > 1)) { 659 error = EBUSY; 660 break; 661 } 662 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 663 /* 664 * We are first shared lock to request an upgrade, so 665 * request upgrade and wait for the shared count to 666 * drop to zero, then take exclusive lock. 667 */ 668 lkp->lk_flags |= LK_WANT_UPGRADE; 669 error = acquire(lkp, &s, extflags, 0, LK_SHARE_NONZERO); 670 lkp->lk_flags &= ~LK_WANT_UPGRADE; 671 if (error) 672 break; 673 lkp->lk_flags |= LK_HAVE_EXCL; 674 SETHOLDER(lkp, pid, lid, cpu_id); 675 #if defined(LOCKDEBUG) 676 lkp->lk_lock_file = file; 677 lkp->lk_lock_line = line; 678 #endif 679 HAVEIT(lkp); 680 if (lkp->lk_exclusivecount != 0) 681 panic("lockmgr: non-zero exclusive count"); 682 lkp->lk_exclusivecount = 1; 683 if (extflags & LK_SETRECURSE) 684 lkp->lk_recurselevel = 1; 685 COUNT(lkp, l, cpu_id, 1); 686 break; 687 } 688 /* 689 * Someone else has requested upgrade. Release our shared 690 * lock, awaken upgrade requestor if we are the last shared 691 * lock, then request an exclusive lock. 692 */ 693 if (lkp->lk_sharecount == 0) 694 WAKEUP_WAITER(lkp); 695 /* fall into exclusive request */ 696 697 case LK_EXCLUSIVE: 698 if (WEHOLDIT(lkp, pid, lid, cpu_id)) { 699 /* 700 * Recursive lock. 701 */ 702 if ((extflags & LK_CANRECURSE) == 0 && 703 lkp->lk_recurselevel == 0) { 704 if (extflags & LK_RECURSEFAIL) { 705 error = EDEADLK; 706 break; 707 } else 708 panic("lockmgr: locking against myself"); 709 } 710 lkp->lk_exclusivecount++; 711 if (extflags & LK_SETRECURSE && 712 lkp->lk_recurselevel == 0) 713 lkp->lk_recurselevel = lkp->lk_exclusivecount; 714 COUNT(lkp, l, cpu_id, 1); 715 break; 716 } 717 /* 718 * If we are just polling, check to see if we will sleep. 719 */ 720 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 721 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 722 LK_SHARE_NONZERO))) { 723 error = EBUSY; 724 break; 725 } 726 /* 727 * Try to acquire the want_exclusive flag. 728 */ 729 error = acquire(lkp, &s, extflags, 0, 730 LK_HAVE_EXCL | LK_WANT_EXCL); 731 if (error) 732 break; 733 lkp->lk_flags |= LK_WANT_EXCL; 734 /* 735 * Wait for shared locks and upgrades to finish. 736 */ 737 error = acquire(lkp, &s, extflags, 0, 738 LK_WANT_UPGRADE | LK_SHARE_NONZERO); 739 lkp->lk_flags &= ~LK_WANT_EXCL; 740 if (error) 741 break; 742 lkp->lk_flags |= LK_HAVE_EXCL; 743 SETHOLDER(lkp, pid, lid, cpu_id); 744 #if defined(LOCKDEBUG) 745 lkp->lk_lock_file = file; 746 lkp->lk_lock_line = line; 747 #endif 748 HAVEIT(lkp); 749 if (lkp->lk_exclusivecount != 0) 750 panic("lockmgr: non-zero exclusive count"); 751 lkp->lk_exclusivecount = 1; 752 if (extflags & LK_SETRECURSE) 753 lkp->lk_recurselevel = 1; 754 COUNT(lkp, l, cpu_id, 1); 755 break; 756 757 case LK_RELEASE: 758 if (lkp->lk_exclusivecount != 0) { 759 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) { 760 if (lkp->lk_flags & LK_SPIN) { 761 panic("lockmgr: processor %lu, not " 762 "exclusive lock holder %lu " 763 "unlocking", cpu_id, lkp->lk_cpu); 764 } else { 765 panic("lockmgr: pid %d, not " 766 "exclusive lock holder %d " 767 "unlocking", pid, 768 lkp->lk_lockholder); 769 } 770 } 771 if (lkp->lk_exclusivecount == lkp->lk_recurselevel) 772 lkp->lk_recurselevel = 0; 773 lkp->lk_exclusivecount--; 774 COUNT(lkp, l, cpu_id, -1); 775 if (lkp->lk_exclusivecount == 0) { 776 lkp->lk_flags &= ~LK_HAVE_EXCL; 777 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 778 #if defined(LOCKDEBUG) 779 lkp->lk_unlock_file = file; 780 lkp->lk_unlock_line = line; 781 #endif 782 DONTHAVEIT(lkp); 783 } 784 } else if (lkp->lk_sharecount != 0) { 785 lkp->lk_sharecount--; 786 if (lkp->lk_sharecount == 0) 787 lkp->lk_flags &= ~LK_SHARE_NONZERO; 788 COUNT(lkp, l, cpu_id, -1); 789 } 790 #ifdef DIAGNOSTIC 791 else 792 panic("lockmgr: release of unlocked lock!"); 793 #endif 794 WAKEUP_WAITER(lkp); 795 break; 796 797 case LK_DRAIN: 798 /* 799 * Check that we do not already hold the lock, as it can 800 * never drain if we do. Unfortunately, we have no way to 801 * check for holding a shared lock, but at least we can 802 * check for an exclusive one. 803 */ 804 if (WEHOLDIT(lkp, pid, lid, cpu_id)) 805 panic("lockmgr: draining against myself"); 806 /* 807 * If we are just polling, check to see if we will sleep. 808 */ 809 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 810 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 811 LK_SHARE_NONZERO | LK_WAIT_NONZERO))) { 812 error = EBUSY; 813 break; 814 } 815 error = acquire(lkp, &s, extflags, 1, 816 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 817 LK_SHARE_NONZERO | LK_WAIT_NONZERO); 818 if (error) 819 break; 820 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 821 SETHOLDER(lkp, pid, lid, cpu_id); 822 #if defined(LOCKDEBUG) 823 lkp->lk_lock_file = file; 824 lkp->lk_lock_line = line; 825 #endif 826 HAVEIT(lkp); 827 lkp->lk_exclusivecount = 1; 828 /* XXX unlikely that we'd want this */ 829 if (extflags & LK_SETRECURSE) 830 lkp->lk_recurselevel = 1; 831 COUNT(lkp, l, cpu_id, 1); 832 break; 833 834 default: 835 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 836 panic("lockmgr: unknown locktype request %d", 837 flags & LK_TYPE_MASK); 838 /* NOTREACHED */ 839 } 840 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN && 841 ((lkp->lk_flags & 842 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 843 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) { 844 lkp->lk_flags &= ~LK_WAITDRAIN; 845 wakeup((void *)&lkp->lk_flags); 846 } 847 /* 848 * Note that this panic will be a recursive panic, since 849 * we only set lock_shutdown_noblock above if panicstr != NULL. 850 */ 851 if (error && lock_shutdown_noblock) 852 panic("lockmgr: deadlock (see previous panic)"); 853 854 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 855 return (error); 856 } 857 858 /* 859 * For a recursive spinlock held one or more times by the current CPU, 860 * release all N locks, and return N. 861 * Intended for use in mi_switch() shortly before context switching. 862 */ 863 864 int 865 #if defined(LOCKDEBUG) 866 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line) 867 #else 868 spinlock_release_all(__volatile struct lock *lkp) 869 #endif 870 { 871 int s, count; 872 cpuid_t cpu_id; 873 874 KASSERT(lkp->lk_flags & LK_SPIN); 875 876 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 877 878 cpu_id = cpu_number(); 879 count = lkp->lk_exclusivecount; 880 881 if (count != 0) { 882 #ifdef DIAGNOSTIC 883 if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) { 884 panic("spinlock_release_all: processor %lu, not " 885 "exclusive lock holder %lu " 886 "unlocking", (long)cpu_id, lkp->lk_cpu); 887 } 888 #endif 889 lkp->lk_recurselevel = 0; 890 lkp->lk_exclusivecount = 0; 891 COUNT_CPU(cpu_id, -count); 892 lkp->lk_flags &= ~LK_HAVE_EXCL; 893 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 894 #if defined(LOCKDEBUG) 895 lkp->lk_unlock_file = file; 896 lkp->lk_unlock_line = line; 897 #endif 898 DONTHAVEIT(lkp); 899 } 900 #ifdef DIAGNOSTIC 901 else if (lkp->lk_sharecount != 0) 902 panic("spinlock_release_all: release of shared lock!"); 903 else 904 panic("spinlock_release_all: release of unlocked lock!"); 905 #endif 906 INTERLOCK_RELEASE(lkp, LK_SPIN, s); 907 908 return (count); 909 } 910 911 /* 912 * For a recursive spinlock held one or more times by the current CPU, 913 * release all N locks, and return N. 914 * Intended for use in mi_switch() right after resuming execution. 915 */ 916 917 void 918 #if defined(LOCKDEBUG) 919 _spinlock_acquire_count(__volatile struct lock *lkp, int count, 920 const char *file, int line) 921 #else 922 spinlock_acquire_count(__volatile struct lock *lkp, int count) 923 #endif 924 { 925 int s, error; 926 cpuid_t cpu_id; 927 928 KASSERT(lkp->lk_flags & LK_SPIN); 929 930 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 931 932 cpu_id = cpu_number(); 933 934 #ifdef DIAGNOSTIC 935 if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id)) 936 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id); 937 #endif 938 /* 939 * Try to acquire the want_exclusive flag. 940 */ 941 error = acquire(lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL); 942 lkp->lk_flags |= LK_WANT_EXCL; 943 /* 944 * Wait for shared locks and upgrades to finish. 945 */ 946 error = acquire(lkp, &s, LK_SPIN, 0, 947 LK_SHARE_NONZERO | LK_WANT_UPGRADE); 948 lkp->lk_flags &= ~LK_WANT_EXCL; 949 lkp->lk_flags |= LK_HAVE_EXCL; 950 SETHOLDER(lkp, LK_NOPROC, 0, cpu_id); 951 #if defined(LOCKDEBUG) 952 lkp->lk_lock_file = file; 953 lkp->lk_lock_line = line; 954 #endif 955 HAVEIT(lkp); 956 if (lkp->lk_exclusivecount != 0) 957 panic("lockmgr: non-zero exclusive count"); 958 lkp->lk_exclusivecount = count; 959 lkp->lk_recurselevel = 1; 960 COUNT_CPU(cpu_id, count); 961 962 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 963 } 964 965 966 967 /* 968 * Print out information about state of a lock. Used by VOP_PRINT 969 * routines to display ststus about contained locks. 970 */ 971 void 972 lockmgr_printinfo(__volatile struct lock *lkp) 973 { 974 975 if (lkp->lk_sharecount) 976 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 977 lkp->lk_sharecount); 978 else if (lkp->lk_flags & LK_HAVE_EXCL) { 979 printf(" lock type %s: EXCL (count %d) by ", 980 lkp->lk_wmesg, lkp->lk_exclusivecount); 981 if (lkp->lk_flags & LK_SPIN) 982 printf("processor %lu", lkp->lk_cpu); 983 else 984 printf("pid %d.%d", lkp->lk_lockholder, 985 lkp->lk_locklwp); 986 } else 987 printf(" not locked"); 988 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0) 989 printf(" with %d pending", lkp->lk_waitcount); 990 } 991 992 #if defined(LOCKDEBUG) /* { */ 993 TAILQ_HEAD(, simplelock) simplelock_list = 994 TAILQ_HEAD_INITIALIZER(simplelock_list); 995 996 #if defined(MULTIPROCESSOR) /* { */ 997 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER; 998 999 #define SLOCK_LIST_LOCK() \ 1000 __cpu_simple_lock(&simplelock_list_slock.lock_data) 1001 1002 #define SLOCK_LIST_UNLOCK() \ 1003 __cpu_simple_unlock(&simplelock_list_slock.lock_data) 1004 1005 #define SLOCK_COUNT(x) \ 1006 curcpu()->ci_simple_locks += (x) 1007 #else 1008 u_long simple_locks; 1009 1010 #define SLOCK_LIST_LOCK() /* nothing */ 1011 1012 #define SLOCK_LIST_UNLOCK() /* nothing */ 1013 1014 #define SLOCK_COUNT(x) simple_locks += (x) 1015 #endif /* MULTIPROCESSOR */ /* } */ 1016 1017 #ifdef MULTIPROCESSOR 1018 #define SLOCK_MP() lock_printf("on CPU %ld\n", \ 1019 (u_long) cpu_number()) 1020 #else 1021 #define SLOCK_MP() /* nothing */ 1022 #endif 1023 1024 #define SLOCK_WHERE(str, alp, id, l) \ 1025 do { \ 1026 lock_printf("\n"); \ 1027 lock_printf(str); \ 1028 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \ 1029 SLOCK_MP(); \ 1030 if ((alp)->lock_file != NULL) \ 1031 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \ 1032 (alp)->lock_line); \ 1033 if ((alp)->unlock_file != NULL) \ 1034 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \ 1035 (alp)->unlock_line); \ 1036 SLOCK_TRACE() \ 1037 SLOCK_DEBUGGER(); \ 1038 } while (/*CONSTCOND*/0) 1039 1040 /* 1041 * Simple lock functions so that the debugger can see from whence 1042 * they are being called. 1043 */ 1044 void 1045 simple_lock_init(struct simplelock *alp) 1046 { 1047 1048 #if defined(MULTIPROCESSOR) /* { */ 1049 __cpu_simple_lock_init(&alp->lock_data); 1050 #else 1051 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1052 #endif /* } */ 1053 alp->lock_file = NULL; 1054 alp->lock_line = 0; 1055 alp->unlock_file = NULL; 1056 alp->unlock_line = 0; 1057 alp->lock_holder = LK_NOCPU; 1058 } 1059 1060 void 1061 _simple_lock(__volatile struct simplelock *alp, const char *id, int l) 1062 { 1063 cpuid_t cpu_id = cpu_number(); 1064 int s; 1065 1066 s = spllock(); 1067 1068 /* 1069 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1070 * don't take any action, and just fall into the normal spin case. 1071 */ 1072 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1073 #if defined(MULTIPROCESSOR) /* { */ 1074 if (alp->lock_holder == cpu_id) { 1075 SLOCK_WHERE("simple_lock: locking against myself\n", 1076 alp, id, l); 1077 goto out; 1078 } 1079 #else 1080 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l); 1081 goto out; 1082 #endif /* MULTIPROCESSOR */ /* } */ 1083 } 1084 1085 #if defined(MULTIPROCESSOR) /* { */ 1086 /* Acquire the lock before modifying any fields. */ 1087 splx(s); 1088 __cpu_simple_lock(&alp->lock_data); 1089 s = spllock(); 1090 #else 1091 alp->lock_data = __SIMPLELOCK_LOCKED; 1092 #endif /* } */ 1093 1094 if (alp->lock_holder != LK_NOCPU) { 1095 SLOCK_WHERE("simple_lock: uninitialized lock\n", 1096 alp, id, l); 1097 } 1098 alp->lock_file = id; 1099 alp->lock_line = l; 1100 alp->lock_holder = cpu_id; 1101 1102 SLOCK_LIST_LOCK(); 1103 /* XXX Cast away volatile */ 1104 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1105 SLOCK_LIST_UNLOCK(); 1106 1107 SLOCK_COUNT(1); 1108 1109 out: 1110 splx(s); 1111 } 1112 1113 int 1114 _simple_lock_held(__volatile struct simplelock *alp) 1115 { 1116 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC) 1117 cpuid_t cpu_id = cpu_number(); 1118 #endif 1119 int s, locked = 0; 1120 1121 s = spllock(); 1122 1123 #if defined(MULTIPROCESSOR) 1124 if (__cpu_simple_lock_try(&alp->lock_data) == 0) 1125 locked = (alp->lock_holder == cpu_id); 1126 else 1127 __cpu_simple_unlock(&alp->lock_data); 1128 #else 1129 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1130 locked = 1; 1131 KASSERT(alp->lock_holder == cpu_id); 1132 } 1133 #endif 1134 1135 splx(s); 1136 1137 return (locked); 1138 } 1139 1140 int 1141 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l) 1142 { 1143 cpuid_t cpu_id = cpu_number(); 1144 int s, rv = 0; 1145 1146 s = spllock(); 1147 1148 /* 1149 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1150 * don't take any action. 1151 */ 1152 #if defined(MULTIPROCESSOR) /* { */ 1153 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) { 1154 if (alp->lock_holder == cpu_id) 1155 SLOCK_WHERE("simple_lock_try: locking against myself\n", 1156 alp, id, l); 1157 goto out; 1158 } 1159 #else 1160 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1161 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l); 1162 goto out; 1163 } 1164 alp->lock_data = __SIMPLELOCK_LOCKED; 1165 #endif /* MULTIPROCESSOR */ /* } */ 1166 1167 /* 1168 * At this point, we have acquired the lock. 1169 */ 1170 1171 rv = 1; 1172 1173 alp->lock_file = id; 1174 alp->lock_line = l; 1175 alp->lock_holder = cpu_id; 1176 1177 SLOCK_LIST_LOCK(); 1178 /* XXX Cast away volatile. */ 1179 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list); 1180 SLOCK_LIST_UNLOCK(); 1181 1182 SLOCK_COUNT(1); 1183 1184 out: 1185 splx(s); 1186 return (rv); 1187 } 1188 1189 void 1190 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l) 1191 { 1192 int s; 1193 1194 s = spllock(); 1195 1196 /* 1197 * MULTIPROCESSOR case: This is `safe' because we think we hold 1198 * the lock, and if we don't, we don't take any action. 1199 */ 1200 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) { 1201 SLOCK_WHERE("simple_unlock: lock not held\n", 1202 alp, id, l); 1203 goto out; 1204 } 1205 1206 SLOCK_LIST_LOCK(); 1207 TAILQ_REMOVE(&simplelock_list, alp, list); 1208 SLOCK_LIST_UNLOCK(); 1209 1210 SLOCK_COUNT(-1); 1211 1212 alp->list.tqe_next = NULL; /* sanity */ 1213 alp->list.tqe_prev = NULL; /* sanity */ 1214 1215 alp->unlock_file = id; 1216 alp->unlock_line = l; 1217 1218 #if defined(MULTIPROCESSOR) /* { */ 1219 alp->lock_holder = LK_NOCPU; 1220 /* Now that we've modified all fields, release the lock. */ 1221 __cpu_simple_unlock(&alp->lock_data); 1222 #else 1223 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1224 KASSERT(alp->lock_holder == cpu_number()); 1225 alp->lock_holder = LK_NOCPU; 1226 #endif /* } */ 1227 1228 out: 1229 splx(s); 1230 } 1231 1232 void 1233 simple_lock_dump(void) 1234 { 1235 struct simplelock *alp; 1236 int s; 1237 1238 s = spllock(); 1239 SLOCK_LIST_LOCK(); 1240 lock_printf("all simple locks:\n"); 1241 TAILQ_FOREACH(alp, &simplelock_list, list) { 1242 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder, 1243 alp->lock_file, alp->lock_line); 1244 } 1245 SLOCK_LIST_UNLOCK(); 1246 splx(s); 1247 } 1248 1249 void 1250 simple_lock_freecheck(void *start, void *end) 1251 { 1252 struct simplelock *alp; 1253 int s; 1254 1255 s = spllock(); 1256 SLOCK_LIST_LOCK(); 1257 TAILQ_FOREACH(alp, &simplelock_list, list) { 1258 if ((void *)alp >= start && (void *)alp < end) { 1259 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n", 1260 alp, alp->lock_holder, alp->lock_file, 1261 alp->lock_line); 1262 SLOCK_DEBUGGER(); 1263 } 1264 } 1265 SLOCK_LIST_UNLOCK(); 1266 splx(s); 1267 } 1268 1269 /* 1270 * We must be holding exactly one lock: the sched_lock. 1271 */ 1272 1273 void 1274 simple_lock_switchcheck(void) 1275 { 1276 1277 simple_lock_only_held(&sched_lock, "switching"); 1278 } 1279 1280 void 1281 simple_lock_only_held(volatile struct simplelock *lp, const char *where) 1282 { 1283 struct simplelock *alp; 1284 cpuid_t cpu_id = cpu_number(); 1285 int s; 1286 1287 if (lp) { 1288 LOCK_ASSERT(simple_lock_held(lp)); 1289 } 1290 s = spllock(); 1291 SLOCK_LIST_LOCK(); 1292 TAILQ_FOREACH(alp, &simplelock_list, list) { 1293 if (alp == lp) 1294 continue; 1295 if (alp->lock_holder == cpu_id) 1296 break; 1297 } 1298 SLOCK_LIST_UNLOCK(); 1299 splx(s); 1300 1301 if (alp != NULL) { 1302 lock_printf("\n%s with held simple_lock %p " 1303 "CPU %lu %s:%d\n", 1304 where, alp, alp->lock_holder, alp->lock_file, 1305 alp->lock_line); 1306 SLOCK_TRACE(); 1307 SLOCK_DEBUGGER(); 1308 } 1309 } 1310 #endif /* LOCKDEBUG */ /* } */ 1311 1312 #if defined(MULTIPROCESSOR) 1313 /* 1314 * Functions for manipulating the kernel_lock. We put them here 1315 * so that they show up in profiles. 1316 */ 1317 1318 struct lock kernel_lock; 1319 1320 void 1321 _kernel_lock_init(void) 1322 { 1323 1324 spinlockinit(&kernel_lock, "klock", 0); 1325 } 1326 1327 /* 1328 * Acquire/release the kernel lock. Intended for use in the scheduler 1329 * and the lower half of the kernel. 1330 */ 1331 void 1332 _kernel_lock(int flag) 1333 { 1334 1335 SCHED_ASSERT_UNLOCKED(); 1336 spinlockmgr(&kernel_lock, flag, 0); 1337 } 1338 1339 void 1340 _kernel_unlock(void) 1341 { 1342 1343 spinlockmgr(&kernel_lock, LK_RELEASE, 0); 1344 } 1345 1346 /* 1347 * Acquire/release the kernel_lock on behalf of a process. Intended for 1348 * use in the top half of the kernel. 1349 */ 1350 void 1351 _kernel_proc_lock(struct lwp *l) 1352 { 1353 1354 SCHED_ASSERT_UNLOCKED(); 1355 spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0); 1356 l->l_flag |= L_BIGLOCK; 1357 } 1358 1359 void 1360 _kernel_proc_unlock(struct lwp *l) 1361 { 1362 1363 l->l_flag &= ~L_BIGLOCK; 1364 spinlockmgr(&kernel_lock, LK_RELEASE, 0); 1365 } 1366 #endif /* MULTIPROCESSOR */ 1367