1 /* $NetBSD: kern_lock.c,v 1.88 2005/06/01 13:12:49 blymn Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * This code is derived from software contributed to The NetBSD Foundation 12 * by Ross Harvey. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the NetBSD 25 * Foundation, Inc. and its contributors. 26 * 4. Neither the name of The NetBSD Foundation nor the names of its 27 * contributors may be used to endorse or promote products derived 28 * from this software without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 1995 45 * The Regents of the University of California. All rights reserved. 46 * 47 * This code contains ideas from software contributed to Berkeley by 48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 49 * System project at Carnegie-Mellon University. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. Neither the name of the University nor the names of its contributors 60 * may be used to endorse or promote products derived from this software 61 * without specific prior written permission. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 73 * SUCH DAMAGE. 74 * 75 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.88 2005/06/01 13:12:49 blymn Exp $"); 80 81 #include "opt_multiprocessor.h" 82 #include "opt_lockdebug.h" 83 #include "opt_ddb.h" 84 85 #include <sys/param.h> 86 #include <sys/proc.h> 87 #include <sys/lock.h> 88 #include <sys/systm.h> 89 #include <machine/cpu.h> 90 91 #if defined(LOCKDEBUG) 92 #include <sys/syslog.h> 93 /* 94 * note that stdarg.h and the ansi style va_start macro is used for both 95 * ansi and traditional c compiles. 96 * XXX: this requires that stdarg.h define: va_alist and va_dcl 97 */ 98 #include <machine/stdarg.h> 99 100 void lock_printf(const char *fmt, ...) 101 __attribute__((__format__(__printf__,1,2))); 102 103 static int acquire(__volatile struct lock **, int *, int, int, int); 104 105 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */ 106 107 #ifdef DDB 108 #include <ddb/ddbvar.h> 109 #include <machine/db_machdep.h> 110 #include <ddb/db_command.h> 111 #include <ddb/db_interface.h> 112 #endif 113 #endif /* defined(LOCKDEBUG) */ 114 115 #if defined(MULTIPROCESSOR) 116 struct simplelock kernel_lock; 117 #endif 118 119 /* 120 * Locking primitives implementation. 121 * Locks provide shared/exclusive synchronization. 122 */ 123 124 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */ 125 #if defined(MULTIPROCESSOR) /* { */ 126 #define COUNT_CPU(cpu_id, x) \ 127 curcpu()->ci_spin_locks += (x) 128 #else 129 u_long spin_locks; 130 #define COUNT_CPU(cpu_id, x) spin_locks += (x) 131 #endif /* MULTIPROCESSOR */ /* } */ 132 133 #define COUNT(lkp, l, cpu_id, x) \ 134 do { \ 135 if ((lkp)->lk_flags & LK_SPIN) \ 136 COUNT_CPU((cpu_id), (x)); \ 137 else \ 138 (l)->l_locks += (x); \ 139 } while (/*CONSTCOND*/0) 140 #else 141 #define COUNT(lkp, p, cpu_id, x) 142 #define COUNT_CPU(cpu_id, x) 143 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */ 144 145 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */ 146 #define SPINLOCK_SPIN_HOOK /* nothing */ 147 #endif 148 149 #define INTERLOCK_ACQUIRE(lkp, flags, s) \ 150 do { \ 151 if ((flags) & LK_SPIN) \ 152 s = spllock(); \ 153 simple_lock(&(lkp)->lk_interlock); \ 154 } while (/*CONSTCOND*/ 0) 155 156 #define INTERLOCK_RELEASE(lkp, flags, s) \ 157 do { \ 158 simple_unlock(&(lkp)->lk_interlock); \ 159 if ((flags) & LK_SPIN) \ 160 splx(s); \ 161 } while (/*CONSTCOND*/ 0) 162 163 #ifdef DDB /* { */ 164 #ifdef MULTIPROCESSOR 165 int simple_lock_debugger = 1; /* more serious on MP */ 166 #else 167 int simple_lock_debugger = 0; 168 #endif 169 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger() 170 #define SLOCK_TRACE() \ 171 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \ 172 TRUE, 65535, "", lock_printf); 173 #else 174 #define SLOCK_DEBUGGER() /* nothing */ 175 #define SLOCK_TRACE() /* nothing */ 176 #endif /* } */ 177 178 #if defined(LOCKDEBUG) 179 #if defined(DDB) 180 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger() 181 #else 182 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */ 183 #endif 184 185 #define SPINLOCK_SPINCHECK_DECL \ 186 /* 32-bits of count -- wrap constitutes a "spinout" */ \ 187 uint32_t __spinc = 0 188 189 #define SPINLOCK_SPINCHECK \ 190 do { \ 191 if (++__spinc == 0) { \ 192 lock_printf("LK_SPIN spinout, excl %d, share %d\n", \ 193 lkp->lk_exclusivecount, lkp->lk_sharecount); \ 194 if (lkp->lk_exclusivecount) \ 195 lock_printf("held by CPU %lu\n", \ 196 (u_long) lkp->lk_cpu); \ 197 if (lkp->lk_lock_file) \ 198 lock_printf("last locked at %s:%d\n", \ 199 lkp->lk_lock_file, lkp->lk_lock_line); \ 200 if (lkp->lk_unlock_file) \ 201 lock_printf("last unlocked at %s:%d\n", \ 202 lkp->lk_unlock_file, lkp->lk_unlock_line); \ 203 SLOCK_TRACE(); \ 204 SPINLOCK_SPINCHECK_DEBUGGER; \ 205 } \ 206 } while (/*CONSTCOND*/ 0) 207 #else 208 #define SPINLOCK_SPINCHECK_DECL /* nothing */ 209 #define SPINLOCK_SPINCHECK /* nothing */ 210 #endif /* LOCKDEBUG && DDB */ 211 212 /* 213 * Acquire a resource. 214 */ 215 static int 216 acquire(__volatile struct lock **lkpp, int *s, int extflags, 217 int drain, int wanted) 218 { 219 int error; 220 __volatile struct lock *lkp = *lkpp; 221 222 KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0); 223 224 if (extflags & LK_SPIN) { 225 int interlocked; 226 227 SPINLOCK_SPINCHECK_DECL; 228 229 if (!drain) { 230 lkp->lk_waitcount++; 231 lkp->lk_flags |= LK_WAIT_NONZERO; 232 } 233 for (interlocked = 1;;) { 234 SPINLOCK_SPINCHECK; 235 if ((lkp->lk_flags & wanted) != 0) { 236 if (interlocked) { 237 INTERLOCK_RELEASE(lkp, LK_SPIN, *s); 238 interlocked = 0; 239 } 240 SPINLOCK_SPIN_HOOK; 241 } else if (interlocked) { 242 break; 243 } else { 244 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s); 245 interlocked = 1; 246 } 247 } 248 if (!drain) { 249 lkp->lk_waitcount--; 250 if (lkp->lk_waitcount == 0) 251 lkp->lk_flags &= ~LK_WAIT_NONZERO; 252 } 253 KASSERT((lkp->lk_flags & wanted) == 0); 254 error = 0; /* sanity */ 255 } else { 256 for (error = 0; (lkp->lk_flags & wanted) != 0; ) { 257 if (drain) 258 lkp->lk_flags |= LK_WAITDRAIN; 259 else { 260 lkp->lk_waitcount++; 261 lkp->lk_flags |= LK_WAIT_NONZERO; 262 } 263 /* XXX Cast away volatile. */ 264 error = ltsleep(drain ? 265 (volatile const void *)&lkp->lk_flags : 266 (volatile const void *)lkp, lkp->lk_prio, 267 lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock); 268 if (!drain) { 269 lkp->lk_waitcount--; 270 if (lkp->lk_waitcount == 0) 271 lkp->lk_flags &= ~LK_WAIT_NONZERO; 272 } 273 if (error) 274 break; 275 if (extflags & LK_SLEEPFAIL) { 276 error = ENOLCK; 277 break; 278 } 279 if (lkp->lk_newlock != NULL) { 280 simple_lock(&lkp->lk_newlock->lk_interlock); 281 simple_unlock(&lkp->lk_interlock); 282 if (lkp->lk_waitcount == 0) 283 wakeup(&lkp->lk_newlock); 284 *lkpp = lkp = lkp->lk_newlock; 285 } 286 } 287 } 288 289 return error; 290 } 291 292 #define SETHOLDER(lkp, pid, lid, cpu_id) \ 293 do { \ 294 if ((lkp)->lk_flags & LK_SPIN) \ 295 (lkp)->lk_cpu = cpu_id; \ 296 else { \ 297 (lkp)->lk_lockholder = pid; \ 298 (lkp)->lk_locklwp = lid; \ 299 } \ 300 } while (/*CONSTCOND*/0) 301 302 #define WEHOLDIT(lkp, pid, lid, cpu_id) \ 303 (((lkp)->lk_flags & LK_SPIN) != 0 ? \ 304 ((lkp)->lk_cpu == (cpu_id)) : \ 305 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid))) 306 307 #define WAKEUP_WAITER(lkp) \ 308 do { \ 309 if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) == \ 310 LK_WAIT_NONZERO) { \ 311 wakeup((lkp)); \ 312 } \ 313 } while (/*CONSTCOND*/0) 314 315 #if defined(LOCKDEBUG) /* { */ 316 #if defined(MULTIPROCESSOR) /* { */ 317 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER; 318 319 #define SPINLOCK_LIST_LOCK() \ 320 __cpu_simple_lock(&spinlock_list_slock.lock_data) 321 322 #define SPINLOCK_LIST_UNLOCK() \ 323 __cpu_simple_unlock(&spinlock_list_slock.lock_data) 324 #else 325 #define SPINLOCK_LIST_LOCK() /* nothing */ 326 327 #define SPINLOCK_LIST_UNLOCK() /* nothing */ 328 #endif /* MULTIPROCESSOR */ /* } */ 329 330 _TAILQ_HEAD(, struct lock, __volatile) spinlock_list = 331 TAILQ_HEAD_INITIALIZER(spinlock_list); 332 333 #define HAVEIT(lkp) \ 334 do { \ 335 if ((lkp)->lk_flags & LK_SPIN) { \ 336 int sp = spllock(); \ 337 SPINLOCK_LIST_LOCK(); \ 338 TAILQ_INSERT_TAIL(&spinlock_list, (lkp), lk_list); \ 339 SPINLOCK_LIST_UNLOCK(); \ 340 splx(sp); \ 341 } \ 342 } while (/*CONSTCOND*/0) 343 344 #define DONTHAVEIT(lkp) \ 345 do { \ 346 if ((lkp)->lk_flags & LK_SPIN) { \ 347 int sp = spllock(); \ 348 SPINLOCK_LIST_LOCK(); \ 349 TAILQ_REMOVE(&spinlock_list, (lkp), lk_list); \ 350 SPINLOCK_LIST_UNLOCK(); \ 351 splx(sp); \ 352 } \ 353 } while (/*CONSTCOND*/0) 354 #else 355 #define HAVEIT(lkp) /* nothing */ 356 357 #define DONTHAVEIT(lkp) /* nothing */ 358 #endif /* LOCKDEBUG */ /* } */ 359 360 #if defined(LOCKDEBUG) 361 /* 362 * Lock debug printing routine; can be configured to print to console 363 * or log to syslog. 364 */ 365 void 366 lock_printf(const char *fmt, ...) 367 { 368 char b[150]; 369 va_list ap; 370 371 va_start(ap, fmt); 372 if (lock_debug_syslog) 373 vlog(LOG_DEBUG, fmt, ap); 374 else { 375 vsnprintf(b, sizeof(b), fmt, ap); 376 printf_nolog("%s", b); 377 } 378 va_end(ap); 379 } 380 #endif /* LOCKDEBUG */ 381 382 /* 383 * Transfer any waiting processes from one lock to another. 384 */ 385 void 386 transferlockers(struct lock *from, struct lock *to) 387 { 388 389 KASSERT(from != to); 390 KASSERT((from->lk_flags & LK_WAITDRAIN) == 0); 391 if (from->lk_waitcount == 0) 392 return; 393 from->lk_newlock = to; 394 wakeup((void *)from); 395 tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0); 396 from->lk_newlock = NULL; 397 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); 398 KASSERT(from->lk_waitcount == 0); 399 } 400 401 402 /* 403 * Initialize a lock; required before use. 404 */ 405 void 406 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags) 407 { 408 409 memset(lkp, 0, sizeof(struct lock)); 410 simple_lock_init(&lkp->lk_interlock); 411 lkp->lk_flags = flags & LK_EXTFLG_MASK; 412 if (flags & LK_SPIN) 413 lkp->lk_cpu = LK_NOCPU; 414 else { 415 lkp->lk_lockholder = LK_NOPROC; 416 lkp->lk_newlock = NULL; 417 lkp->lk_prio = prio; 418 lkp->lk_timo = timo; 419 } 420 lkp->lk_wmesg = wmesg; /* just a name for spin locks */ 421 #if defined(LOCKDEBUG) 422 lkp->lk_lock_file = NULL; 423 lkp->lk_unlock_file = NULL; 424 #endif 425 } 426 427 /* 428 * Determine the status of a lock. 429 */ 430 int 431 lockstatus(struct lock *lkp) 432 { 433 int s = 0; /* XXX: gcc */ 434 int lock_type = 0; 435 struct lwp *l = curlwp; /* XXX */ 436 pid_t pid; 437 lwpid_t lid; 438 cpuid_t cpu_num; 439 440 if ((lkp->lk_flags & LK_SPIN) || l == NULL) { 441 cpu_num = cpu_number(); 442 pid = LK_KERNPROC; 443 lid = 0; 444 } else { 445 cpu_num = LK_NOCPU; 446 pid = l->l_proc->p_pid; 447 lid = l->l_lid; 448 } 449 450 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 451 if (lkp->lk_exclusivecount != 0) { 452 if (WEHOLDIT(lkp, pid, lid, cpu_num)) 453 lock_type = LK_EXCLUSIVE; 454 else 455 lock_type = LK_EXCLOTHER; 456 } else if (lkp->lk_sharecount != 0) 457 lock_type = LK_SHARED; 458 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 459 return (lock_type); 460 } 461 462 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) 463 /* 464 * Make sure no spin locks are held by a CPU that is about 465 * to context switch. 466 */ 467 void 468 spinlock_switchcheck(void) 469 { 470 u_long cnt; 471 int s; 472 473 s = spllock(); 474 #if defined(MULTIPROCESSOR) 475 cnt = curcpu()->ci_spin_locks; 476 #else 477 cnt = spin_locks; 478 #endif 479 splx(s); 480 481 if (cnt != 0) 482 panic("spinlock_switchcheck: CPU %lu has %lu spin locks", 483 (u_long) cpu_number(), cnt); 484 } 485 #endif /* LOCKDEBUG || DIAGNOSTIC */ 486 487 /* 488 * Locks and IPLs (interrupt priority levels): 489 * 490 * Locks which may be taken from interrupt context must be handled 491 * very carefully; you must spl to the highest IPL where the lock 492 * is needed before acquiring the lock. 493 * 494 * It is also important to avoid deadlock, since certain (very high 495 * priority) interrupts are often needed to keep the system as a whole 496 * from deadlocking, and must not be blocked while you are spinning 497 * waiting for a lower-priority lock. 498 * 499 * In addition, the lock-debugging hooks themselves need to use locks! 500 * 501 * A raw __cpu_simple_lock may be used from interrupts are long as it 502 * is acquired and held at a single IPL. 503 * 504 * A simple_lock (which is a __cpu_simple_lock wrapped with some 505 * debugging hooks) may be used at or below spllock(), which is 506 * typically at or just below splhigh() (i.e. blocks everything 507 * but certain machine-dependent extremely high priority interrupts). 508 * 509 * spinlockmgr spinlocks should be used at or below splsched(). 510 * 511 * Some platforms may have interrupts of higher priority than splsched(), 512 * including hard serial interrupts, inter-processor interrupts, and 513 * kernel debugger traps. 514 */ 515 516 /* 517 * XXX XXX kludge around another kludge.. 518 * 519 * vfs_shutdown() may be called from interrupt context, either as a result 520 * of a panic, or from the debugger. It proceeds to call 521 * sys_sync(&proc0, ...), pretending its running on behalf of proc0 522 * 523 * We would like to make an attempt to sync the filesystems in this case, so 524 * if this happens, we treat attempts to acquire locks specially. 525 * All locks are acquired on behalf of proc0. 526 * 527 * If we've already paniced, we don't block waiting for locks, but 528 * just barge right ahead since we're already going down in flames. 529 */ 530 531 /* 532 * Set, change, or release a lock. 533 * 534 * Shared requests increment the shared count. Exclusive requests set the 535 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 536 * accepted shared locks and shared-to-exclusive upgrades to go away. 537 */ 538 int 539 #if defined(LOCKDEBUG) 540 _lockmgr(__volatile struct lock *lkp, u_int flags, 541 struct simplelock *interlkp, const char *file, int line) 542 #else 543 lockmgr(__volatile struct lock *lkp, u_int flags, 544 struct simplelock *interlkp) 545 #endif 546 { 547 int error; 548 pid_t pid; 549 lwpid_t lid; 550 int extflags; 551 cpuid_t cpu_num; 552 struct lwp *l = curlwp; 553 int lock_shutdown_noblock = 0; 554 int s = 0; 555 556 error = 0; 557 558 /* LK_RETRY is for vn_lock, not for lockmgr. */ 559 KASSERT((flags & LK_RETRY) == 0); 560 561 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s); 562 if (flags & LK_INTERLOCK) 563 simple_unlock(interlkp); 564 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 565 566 #ifdef DIAGNOSTIC /* { */ 567 /* 568 * Don't allow spins on sleep locks and don't allow sleeps 569 * on spin locks. 570 */ 571 if ((flags ^ lkp->lk_flags) & LK_SPIN) 572 panic("lockmgr: sleep/spin mismatch"); 573 #endif /* } */ 574 575 if (extflags & LK_SPIN) { 576 pid = LK_KERNPROC; 577 lid = 0; 578 } else { 579 if (l == NULL) { 580 if (!doing_shutdown) { 581 panic("lockmgr: no context"); 582 } else { 583 l = &lwp0; 584 if (panicstr && (!(flags & LK_NOWAIT))) { 585 flags |= LK_NOWAIT; 586 lock_shutdown_noblock = 1; 587 } 588 } 589 } 590 lid = l->l_lid; 591 pid = l->l_proc->p_pid; 592 } 593 cpu_num = cpu_number(); 594 595 /* 596 * Once a lock has drained, the LK_DRAINING flag is set and an 597 * exclusive lock is returned. The only valid operation thereafter 598 * is a single release of that exclusive lock. This final release 599 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 600 * further requests of any sort will result in a panic. The bits 601 * selected for these two flags are chosen so that they will be set 602 * in memory that is freed (freed memory is filled with 0xdeadbeef). 603 * The final release is permitted to give a new lease on life to 604 * the lock by specifying LK_REENABLE. 605 */ 606 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 607 #ifdef DIAGNOSTIC /* { */ 608 if (lkp->lk_flags & LK_DRAINED) 609 panic("lockmgr: using decommissioned lock"); 610 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 611 WEHOLDIT(lkp, pid, lid, cpu_num) == 0) 612 panic("lockmgr: non-release on draining lock: %d", 613 flags & LK_TYPE_MASK); 614 #endif /* DIAGNOSTIC */ /* } */ 615 lkp->lk_flags &= ~LK_DRAINING; 616 if ((flags & LK_REENABLE) == 0) 617 lkp->lk_flags |= LK_DRAINED; 618 } 619 620 switch (flags & LK_TYPE_MASK) { 621 622 case LK_SHARED: 623 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) { 624 /* 625 * If just polling, check to see if we will block. 626 */ 627 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 628 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 629 error = EBUSY; 630 break; 631 } 632 /* 633 * Wait for exclusive locks and upgrades to clear. 634 */ 635 error = acquire(&lkp, &s, extflags, 0, 636 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE); 637 if (error) 638 break; 639 lkp->lk_sharecount++; 640 lkp->lk_flags |= LK_SHARE_NONZERO; 641 COUNT(lkp, l, cpu_num, 1); 642 break; 643 } 644 /* 645 * We hold an exclusive lock, so downgrade it to shared. 646 * An alternative would be to fail with EDEADLK. 647 */ 648 lkp->lk_sharecount++; 649 lkp->lk_flags |= LK_SHARE_NONZERO; 650 COUNT(lkp, l, cpu_num, 1); 651 /* fall into downgrade */ 652 653 case LK_DOWNGRADE: 654 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0 || 655 lkp->lk_exclusivecount == 0) 656 panic("lockmgr: not holding exclusive lock"); 657 lkp->lk_sharecount += lkp->lk_exclusivecount; 658 lkp->lk_flags |= LK_SHARE_NONZERO; 659 lkp->lk_exclusivecount = 0; 660 lkp->lk_recurselevel = 0; 661 lkp->lk_flags &= ~LK_HAVE_EXCL; 662 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 663 #if defined(LOCKDEBUG) 664 lkp->lk_unlock_file = file; 665 lkp->lk_unlock_line = line; 666 #endif 667 DONTHAVEIT(lkp); 668 WAKEUP_WAITER(lkp); 669 break; 670 671 case LK_EXCLUPGRADE: 672 /* 673 * If another process is ahead of us to get an upgrade, 674 * then we want to fail rather than have an intervening 675 * exclusive access. 676 */ 677 if (lkp->lk_flags & LK_WANT_UPGRADE) { 678 lkp->lk_sharecount--; 679 if (lkp->lk_sharecount == 0) 680 lkp->lk_flags &= ~LK_SHARE_NONZERO; 681 COUNT(lkp, l, cpu_num, -1); 682 error = EBUSY; 683 break; 684 } 685 /* fall into normal upgrade */ 686 687 case LK_UPGRADE: 688 /* 689 * Upgrade a shared lock to an exclusive one. If another 690 * shared lock has already requested an upgrade to an 691 * exclusive lock, our shared lock is released and an 692 * exclusive lock is requested (which will be granted 693 * after the upgrade). If we return an error, the file 694 * will always be unlocked. 695 */ 696 if (WEHOLDIT(lkp, pid, lid, cpu_num) || lkp->lk_sharecount <= 0) 697 panic("lockmgr: upgrade exclusive lock"); 698 lkp->lk_sharecount--; 699 if (lkp->lk_sharecount == 0) 700 lkp->lk_flags &= ~LK_SHARE_NONZERO; 701 COUNT(lkp, l, cpu_num, -1); 702 /* 703 * If we are just polling, check to see if we will block. 704 */ 705 if ((extflags & LK_NOWAIT) && 706 ((lkp->lk_flags & LK_WANT_UPGRADE) || 707 lkp->lk_sharecount > 1)) { 708 error = EBUSY; 709 break; 710 } 711 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 712 /* 713 * We are first shared lock to request an upgrade, so 714 * request upgrade and wait for the shared count to 715 * drop to zero, then take exclusive lock. 716 */ 717 lkp->lk_flags |= LK_WANT_UPGRADE; 718 error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO); 719 lkp->lk_flags &= ~LK_WANT_UPGRADE; 720 if (error) { 721 WAKEUP_WAITER(lkp); 722 break; 723 } 724 lkp->lk_flags |= LK_HAVE_EXCL; 725 SETHOLDER(lkp, pid, lid, cpu_num); 726 #if defined(LOCKDEBUG) 727 lkp->lk_lock_file = file; 728 lkp->lk_lock_line = line; 729 #endif 730 HAVEIT(lkp); 731 if (lkp->lk_exclusivecount != 0) 732 panic("lockmgr: non-zero exclusive count"); 733 lkp->lk_exclusivecount = 1; 734 if (extflags & LK_SETRECURSE) 735 lkp->lk_recurselevel = 1; 736 COUNT(lkp, l, cpu_num, 1); 737 break; 738 } 739 /* 740 * Someone else has requested upgrade. Release our shared 741 * lock, awaken upgrade requestor if we are the last shared 742 * lock, then request an exclusive lock. 743 */ 744 if (lkp->lk_sharecount == 0) 745 WAKEUP_WAITER(lkp); 746 /* fall into exclusive request */ 747 748 case LK_EXCLUSIVE: 749 if (WEHOLDIT(lkp, pid, lid, cpu_num)) { 750 /* 751 * Recursive lock. 752 */ 753 if ((extflags & LK_CANRECURSE) == 0 && 754 lkp->lk_recurselevel == 0) { 755 if (extflags & LK_RECURSEFAIL) { 756 error = EDEADLK; 757 break; 758 } else 759 panic("lockmgr: locking against myself"); 760 } 761 lkp->lk_exclusivecount++; 762 if (extflags & LK_SETRECURSE && 763 lkp->lk_recurselevel == 0) 764 lkp->lk_recurselevel = lkp->lk_exclusivecount; 765 COUNT(lkp, l, cpu_num, 1); 766 break; 767 } 768 /* 769 * If we are just polling, check to see if we will sleep. 770 */ 771 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 772 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 773 LK_SHARE_NONZERO))) { 774 error = EBUSY; 775 break; 776 } 777 /* 778 * Try to acquire the want_exclusive flag. 779 */ 780 error = acquire(&lkp, &s, extflags, 0, 781 LK_HAVE_EXCL | LK_WANT_EXCL); 782 if (error) 783 break; 784 lkp->lk_flags |= LK_WANT_EXCL; 785 /* 786 * Wait for shared locks and upgrades to finish. 787 */ 788 error = acquire(&lkp, &s, extflags, 0, 789 LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO); 790 lkp->lk_flags &= ~LK_WANT_EXCL; 791 if (error) { 792 WAKEUP_WAITER(lkp); 793 break; 794 } 795 lkp->lk_flags |= LK_HAVE_EXCL; 796 SETHOLDER(lkp, pid, lid, cpu_num); 797 #if defined(LOCKDEBUG) 798 lkp->lk_lock_file = file; 799 lkp->lk_lock_line = line; 800 #endif 801 HAVEIT(lkp); 802 if (lkp->lk_exclusivecount != 0) 803 panic("lockmgr: non-zero exclusive count"); 804 lkp->lk_exclusivecount = 1; 805 if (extflags & LK_SETRECURSE) 806 lkp->lk_recurselevel = 1; 807 COUNT(lkp, l, cpu_num, 1); 808 break; 809 810 case LK_RELEASE: 811 if (lkp->lk_exclusivecount != 0) { 812 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) { 813 if (lkp->lk_flags & LK_SPIN) { 814 panic("lockmgr: processor %lu, not " 815 "exclusive lock holder %lu " 816 "unlocking", cpu_num, lkp->lk_cpu); 817 } else { 818 panic("lockmgr: pid %d, not " 819 "exclusive lock holder %d " 820 "unlocking", pid, 821 lkp->lk_lockholder); 822 } 823 } 824 if (lkp->lk_exclusivecount == lkp->lk_recurselevel) 825 lkp->lk_recurselevel = 0; 826 lkp->lk_exclusivecount--; 827 COUNT(lkp, l, cpu_num, -1); 828 if (lkp->lk_exclusivecount == 0) { 829 lkp->lk_flags &= ~LK_HAVE_EXCL; 830 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 831 #if defined(LOCKDEBUG) 832 lkp->lk_unlock_file = file; 833 lkp->lk_unlock_line = line; 834 #endif 835 DONTHAVEIT(lkp); 836 } 837 } else if (lkp->lk_sharecount != 0) { 838 lkp->lk_sharecount--; 839 if (lkp->lk_sharecount == 0) 840 lkp->lk_flags &= ~LK_SHARE_NONZERO; 841 COUNT(lkp, l, cpu_num, -1); 842 } 843 #ifdef DIAGNOSTIC 844 else 845 panic("lockmgr: release of unlocked lock!"); 846 #endif 847 WAKEUP_WAITER(lkp); 848 break; 849 850 case LK_DRAIN: 851 /* 852 * Check that we do not already hold the lock, as it can 853 * never drain if we do. Unfortunately, we have no way to 854 * check for holding a shared lock, but at least we can 855 * check for an exclusive one. 856 */ 857 if (WEHOLDIT(lkp, pid, lid, cpu_num)) 858 panic("lockmgr: draining against myself"); 859 /* 860 * If we are just polling, check to see if we will sleep. 861 */ 862 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 863 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 864 LK_SHARE_NONZERO | LK_WAIT_NONZERO))) { 865 error = EBUSY; 866 break; 867 } 868 error = acquire(&lkp, &s, extflags, 1, 869 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 870 LK_SHARE_NONZERO | LK_WAIT_NONZERO); 871 if (error) 872 break; 873 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 874 SETHOLDER(lkp, pid, lid, cpu_num); 875 #if defined(LOCKDEBUG) 876 lkp->lk_lock_file = file; 877 lkp->lk_lock_line = line; 878 #endif 879 HAVEIT(lkp); 880 lkp->lk_exclusivecount = 1; 881 /* XXX unlikely that we'd want this */ 882 if (extflags & LK_SETRECURSE) 883 lkp->lk_recurselevel = 1; 884 COUNT(lkp, l, cpu_num, 1); 885 break; 886 887 default: 888 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 889 panic("lockmgr: unknown locktype request %d", 890 flags & LK_TYPE_MASK); 891 /* NOTREACHED */ 892 } 893 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN && 894 ((lkp->lk_flags & 895 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 896 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) { 897 lkp->lk_flags &= ~LK_WAITDRAIN; 898 wakeup(&lkp->lk_flags); 899 } 900 /* 901 * Note that this panic will be a recursive panic, since 902 * we only set lock_shutdown_noblock above if panicstr != NULL. 903 */ 904 if (error && lock_shutdown_noblock) 905 panic("lockmgr: deadlock (see previous panic)"); 906 907 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 908 return (error); 909 } 910 911 /* 912 * For a recursive spinlock held one or more times by the current CPU, 913 * release all N locks, and return N. 914 * Intended for use in mi_switch() shortly before context switching. 915 */ 916 917 int 918 #if defined(LOCKDEBUG) 919 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line) 920 #else 921 spinlock_release_all(__volatile struct lock *lkp) 922 #endif 923 { 924 int s, count; 925 cpuid_t cpu_num; 926 927 KASSERT(lkp->lk_flags & LK_SPIN); 928 929 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 930 931 cpu_num = cpu_number(); 932 count = lkp->lk_exclusivecount; 933 934 if (count != 0) { 935 #ifdef DIAGNOSTIC 936 if (WEHOLDIT(lkp, 0, 0, cpu_num) == 0) { 937 panic("spinlock_release_all: processor %lu, not " 938 "exclusive lock holder %lu " 939 "unlocking", (long)cpu_num, lkp->lk_cpu); 940 } 941 #endif 942 lkp->lk_recurselevel = 0; 943 lkp->lk_exclusivecount = 0; 944 COUNT_CPU(cpu_num, -count); 945 lkp->lk_flags &= ~LK_HAVE_EXCL; 946 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU); 947 #if defined(LOCKDEBUG) 948 lkp->lk_unlock_file = file; 949 lkp->lk_unlock_line = line; 950 #endif 951 DONTHAVEIT(lkp); 952 } 953 #ifdef DIAGNOSTIC 954 else if (lkp->lk_sharecount != 0) 955 panic("spinlock_release_all: release of shared lock!"); 956 else 957 panic("spinlock_release_all: release of unlocked lock!"); 958 #endif 959 INTERLOCK_RELEASE(lkp, LK_SPIN, s); 960 961 return (count); 962 } 963 964 /* 965 * For a recursive spinlock held one or more times by the current CPU, 966 * release all N locks, and return N. 967 * Intended for use in mi_switch() right after resuming execution. 968 */ 969 970 void 971 #if defined(LOCKDEBUG) 972 _spinlock_acquire_count(__volatile struct lock *lkp, int count, 973 const char *file, int line) 974 #else 975 spinlock_acquire_count(__volatile struct lock *lkp, int count) 976 #endif 977 { 978 int s, error; 979 cpuid_t cpu_num; 980 981 KASSERT(lkp->lk_flags & LK_SPIN); 982 983 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s); 984 985 cpu_num = cpu_number(); 986 987 #ifdef DIAGNOSTIC 988 if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_num)) 989 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_num); 990 #endif 991 /* 992 * Try to acquire the want_exclusive flag. 993 */ 994 error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL); 995 lkp->lk_flags |= LK_WANT_EXCL; 996 /* 997 * Wait for shared locks and upgrades to finish. 998 */ 999 error = acquire(&lkp, &s, LK_SPIN, 0, 1000 LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE); 1001 lkp->lk_flags &= ~LK_WANT_EXCL; 1002 lkp->lk_flags |= LK_HAVE_EXCL; 1003 SETHOLDER(lkp, LK_NOPROC, 0, cpu_num); 1004 #if defined(LOCKDEBUG) 1005 lkp->lk_lock_file = file; 1006 lkp->lk_lock_line = line; 1007 #endif 1008 HAVEIT(lkp); 1009 if (lkp->lk_exclusivecount != 0) 1010 panic("lockmgr: non-zero exclusive count"); 1011 lkp->lk_exclusivecount = count; 1012 lkp->lk_recurselevel = 1; 1013 COUNT_CPU(cpu_num, count); 1014 1015 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s); 1016 } 1017 1018 1019 1020 /* 1021 * Print out information about state of a lock. Used by VOP_PRINT 1022 * routines to display ststus about contained locks. 1023 */ 1024 void 1025 lockmgr_printinfo(__volatile struct lock *lkp) 1026 { 1027 1028 if (lkp->lk_sharecount) 1029 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 1030 lkp->lk_sharecount); 1031 else if (lkp->lk_flags & LK_HAVE_EXCL) { 1032 printf(" lock type %s: EXCL (count %d) by ", 1033 lkp->lk_wmesg, lkp->lk_exclusivecount); 1034 if (lkp->lk_flags & LK_SPIN) 1035 printf("processor %lu", lkp->lk_cpu); 1036 else 1037 printf("pid %d.%d", lkp->lk_lockholder, 1038 lkp->lk_locklwp); 1039 } else 1040 printf(" not locked"); 1041 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0) 1042 printf(" with %d pending", lkp->lk_waitcount); 1043 } 1044 1045 #if defined(LOCKDEBUG) /* { */ 1046 _TAILQ_HEAD(, struct simplelock, __volatile) simplelock_list = 1047 TAILQ_HEAD_INITIALIZER(simplelock_list); 1048 1049 #if defined(MULTIPROCESSOR) /* { */ 1050 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER; 1051 1052 #define SLOCK_LIST_LOCK() \ 1053 __cpu_simple_lock(&simplelock_list_slock.lock_data) 1054 1055 #define SLOCK_LIST_UNLOCK() \ 1056 __cpu_simple_unlock(&simplelock_list_slock.lock_data) 1057 1058 #define SLOCK_COUNT(x) \ 1059 curcpu()->ci_simple_locks += (x) 1060 #else 1061 u_long simple_locks; 1062 1063 #define SLOCK_LIST_LOCK() /* nothing */ 1064 1065 #define SLOCK_LIST_UNLOCK() /* nothing */ 1066 1067 #define SLOCK_COUNT(x) simple_locks += (x) 1068 #endif /* MULTIPROCESSOR */ /* } */ 1069 1070 #ifdef MULTIPROCESSOR 1071 #define SLOCK_MP() lock_printf("on CPU %ld\n", \ 1072 (u_long) cpu_number()) 1073 #else 1074 #define SLOCK_MP() /* nothing */ 1075 #endif 1076 1077 #define SLOCK_WHERE(str, alp, id, l) \ 1078 do { \ 1079 lock_printf("\n"); \ 1080 lock_printf(str); \ 1081 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \ 1082 SLOCK_MP(); \ 1083 if ((alp)->lock_file != NULL) \ 1084 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \ 1085 (alp)->lock_line); \ 1086 if ((alp)->unlock_file != NULL) \ 1087 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \ 1088 (alp)->unlock_line); \ 1089 SLOCK_TRACE() \ 1090 SLOCK_DEBUGGER(); \ 1091 } while (/*CONSTCOND*/0) 1092 1093 /* 1094 * Simple lock functions so that the debugger can see from whence 1095 * they are being called. 1096 */ 1097 void 1098 simple_lock_init(__volatile struct simplelock *alp) 1099 { 1100 1101 #if defined(MULTIPROCESSOR) /* { */ 1102 __cpu_simple_lock_init(&alp->lock_data); 1103 #else 1104 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1105 #endif /* } */ 1106 alp->lock_file = NULL; 1107 alp->lock_line = 0; 1108 alp->unlock_file = NULL; 1109 alp->unlock_line = 0; 1110 alp->lock_holder = LK_NOCPU; 1111 } 1112 1113 void 1114 _simple_lock(__volatile struct simplelock *alp, const char *id, int l) 1115 { 1116 cpuid_t cpu_num = cpu_number(); 1117 int s; 1118 1119 s = spllock(); 1120 1121 /* 1122 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1123 * don't take any action, and just fall into the normal spin case. 1124 */ 1125 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1126 #if defined(MULTIPROCESSOR) /* { */ 1127 if (alp->lock_holder == cpu_num) { 1128 SLOCK_WHERE("simple_lock: locking against myself\n", 1129 alp, id, l); 1130 goto out; 1131 } 1132 #else 1133 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l); 1134 goto out; 1135 #endif /* MULTIPROCESSOR */ /* } */ 1136 } 1137 1138 #if defined(MULTIPROCESSOR) /* { */ 1139 /* Acquire the lock before modifying any fields. */ 1140 splx(s); 1141 __cpu_simple_lock(&alp->lock_data); 1142 s = spllock(); 1143 #else 1144 alp->lock_data = __SIMPLELOCK_LOCKED; 1145 #endif /* } */ 1146 1147 if (alp->lock_holder != LK_NOCPU) { 1148 SLOCK_WHERE("simple_lock: uninitialized lock\n", 1149 alp, id, l); 1150 } 1151 alp->lock_file = id; 1152 alp->lock_line = l; 1153 alp->lock_holder = cpu_num; 1154 1155 SLOCK_LIST_LOCK(); 1156 TAILQ_INSERT_TAIL(&simplelock_list, alp, list); 1157 SLOCK_LIST_UNLOCK(); 1158 1159 SLOCK_COUNT(1); 1160 1161 out: 1162 splx(s); 1163 } 1164 1165 int 1166 _simple_lock_held(__volatile struct simplelock *alp) 1167 { 1168 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC) 1169 cpuid_t cpu_num = cpu_number(); 1170 #endif 1171 int s, locked = 0; 1172 1173 s = spllock(); 1174 1175 #if defined(MULTIPROCESSOR) 1176 if (__cpu_simple_lock_try(&alp->lock_data) == 0) 1177 locked = (alp->lock_holder == cpu_num); 1178 else 1179 __cpu_simple_unlock(&alp->lock_data); 1180 #else 1181 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1182 locked = 1; 1183 KASSERT(alp->lock_holder == cpu_num); 1184 } 1185 #endif 1186 1187 splx(s); 1188 1189 return (locked); 1190 } 1191 1192 int 1193 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l) 1194 { 1195 cpuid_t cpu_num = cpu_number(); 1196 int s, rv = 0; 1197 1198 s = spllock(); 1199 1200 /* 1201 * MULTIPROCESSOR case: This is `safe' since if it's not us, we 1202 * don't take any action. 1203 */ 1204 #if defined(MULTIPROCESSOR) /* { */ 1205 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) { 1206 if (alp->lock_holder == cpu_num) 1207 SLOCK_WHERE("simple_lock_try: locking against myself\n", 1208 alp, id, l); 1209 goto out; 1210 } 1211 #else 1212 if (alp->lock_data == __SIMPLELOCK_LOCKED) { 1213 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l); 1214 goto out; 1215 } 1216 alp->lock_data = __SIMPLELOCK_LOCKED; 1217 #endif /* MULTIPROCESSOR */ /* } */ 1218 1219 /* 1220 * At this point, we have acquired the lock. 1221 */ 1222 1223 rv = 1; 1224 1225 alp->lock_file = id; 1226 alp->lock_line = l; 1227 alp->lock_holder = cpu_num; 1228 1229 SLOCK_LIST_LOCK(); 1230 TAILQ_INSERT_TAIL(&simplelock_list, alp, list); 1231 SLOCK_LIST_UNLOCK(); 1232 1233 SLOCK_COUNT(1); 1234 1235 out: 1236 splx(s); 1237 return (rv); 1238 } 1239 1240 void 1241 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l) 1242 { 1243 int s; 1244 1245 s = spllock(); 1246 1247 /* 1248 * MULTIPROCESSOR case: This is `safe' because we think we hold 1249 * the lock, and if we don't, we don't take any action. 1250 */ 1251 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) { 1252 SLOCK_WHERE("simple_unlock: lock not held\n", 1253 alp, id, l); 1254 goto out; 1255 } 1256 1257 SLOCK_LIST_LOCK(); 1258 TAILQ_REMOVE(&simplelock_list, alp, list); 1259 SLOCK_LIST_UNLOCK(); 1260 1261 SLOCK_COUNT(-1); 1262 1263 alp->list.tqe_next = NULL; /* sanity */ 1264 alp->list.tqe_prev = NULL; /* sanity */ 1265 1266 alp->unlock_file = id; 1267 alp->unlock_line = l; 1268 1269 #if defined(MULTIPROCESSOR) /* { */ 1270 alp->lock_holder = LK_NOCPU; 1271 /* Now that we've modified all fields, release the lock. */ 1272 __cpu_simple_unlock(&alp->lock_data); 1273 #else 1274 alp->lock_data = __SIMPLELOCK_UNLOCKED; 1275 KASSERT(alp->lock_holder == cpu_number()); 1276 alp->lock_holder = LK_NOCPU; 1277 #endif /* } */ 1278 1279 out: 1280 splx(s); 1281 } 1282 1283 void 1284 simple_lock_dump(void) 1285 { 1286 __volatile struct simplelock *alp; 1287 int s; 1288 1289 s = spllock(); 1290 SLOCK_LIST_LOCK(); 1291 lock_printf("all simple locks:\n"); 1292 TAILQ_FOREACH(alp, &simplelock_list, list) { 1293 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder, 1294 alp->lock_file, alp->lock_line); 1295 } 1296 SLOCK_LIST_UNLOCK(); 1297 splx(s); 1298 } 1299 1300 void 1301 simple_lock_freecheck(void *start, void *end) 1302 { 1303 __volatile struct simplelock *alp; 1304 int s; 1305 1306 s = spllock(); 1307 SLOCK_LIST_LOCK(); 1308 TAILQ_FOREACH(alp, &simplelock_list, list) { 1309 if ((__volatile void *)alp >= start && 1310 (__volatile void *)alp < end) { 1311 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n", 1312 alp, alp->lock_holder, alp->lock_file, 1313 alp->lock_line); 1314 SLOCK_DEBUGGER(); 1315 } 1316 } 1317 SLOCK_LIST_UNLOCK(); 1318 splx(s); 1319 } 1320 1321 /* 1322 * We must be holding exactly one lock: the sched_lock. 1323 */ 1324 1325 void 1326 simple_lock_switchcheck(void) 1327 { 1328 1329 simple_lock_only_held(&sched_lock, "switching"); 1330 } 1331 1332 void 1333 simple_lock_only_held(volatile struct simplelock *lp, const char *where) 1334 { 1335 __volatile struct simplelock *alp; 1336 cpuid_t cpu_num = cpu_number(); 1337 int s; 1338 1339 if (lp) { 1340 LOCK_ASSERT(simple_lock_held(lp)); 1341 } 1342 s = spllock(); 1343 SLOCK_LIST_LOCK(); 1344 TAILQ_FOREACH(alp, &simplelock_list, list) { 1345 if (alp == lp) 1346 continue; 1347 #if defined(MULTIPROCESSOR) 1348 if (alp == &kernel_lock) 1349 continue; 1350 #endif /* defined(MULTIPROCESSOR) */ 1351 if (alp->lock_holder == cpu_num) 1352 break; 1353 } 1354 SLOCK_LIST_UNLOCK(); 1355 splx(s); 1356 1357 if (alp != NULL) { 1358 lock_printf("\n%s with held simple_lock %p " 1359 "CPU %lu %s:%d\n", 1360 where, alp, alp->lock_holder, alp->lock_file, 1361 alp->lock_line); 1362 SLOCK_TRACE(); 1363 SLOCK_DEBUGGER(); 1364 } 1365 } 1366 #endif /* LOCKDEBUG */ /* } */ 1367 1368 #if defined(MULTIPROCESSOR) 1369 /* 1370 * Functions for manipulating the kernel_lock. We put them here 1371 * so that they show up in profiles. 1372 */ 1373 1374 /* 1375 * splbiglock: block IPLs which need to grab kernel_lock. 1376 * XXX splvm or splaudio should be enough. 1377 */ 1378 #if !defined(__HAVE_SPLBIGLOCK) 1379 #define splbiglock() splclock() 1380 #endif 1381 1382 void 1383 _kernel_lock_init(void) 1384 { 1385 1386 simple_lock_init(&kernel_lock); 1387 } 1388 1389 /* 1390 * Acquire/release the kernel lock. Intended for use in the scheduler 1391 * and the lower half of the kernel. 1392 */ 1393 void 1394 _kernel_lock(int flag) 1395 { 1396 struct cpu_info *ci = curcpu(); 1397 1398 SCHED_ASSERT_UNLOCKED(); 1399 1400 if (ci->ci_data.cpu_biglock_count > 0) { 1401 LOCK_ASSERT(simple_lock_held(&kernel_lock)); 1402 ci->ci_data.cpu_biglock_count++; 1403 } else { 1404 int s; 1405 1406 s = splbiglock(); 1407 while (!simple_lock_try(&kernel_lock)) { 1408 splx(s); 1409 SPINLOCK_SPIN_HOOK; 1410 s = splbiglock(); 1411 } 1412 ci->ci_data.cpu_biglock_count++; 1413 splx(s); 1414 } 1415 } 1416 1417 void 1418 _kernel_unlock(void) 1419 { 1420 struct cpu_info *ci = curcpu(); 1421 int s; 1422 1423 KASSERT(ci->ci_data.cpu_biglock_count > 0); 1424 1425 s = splbiglock(); 1426 if ((--ci->ci_data.cpu_biglock_count) == 0) 1427 simple_unlock(&kernel_lock); 1428 splx(s); 1429 } 1430 1431 /* 1432 * Acquire/release the kernel_lock on behalf of a process. Intended for 1433 * use in the top half of the kernel. 1434 */ 1435 void 1436 _kernel_proc_lock(struct lwp *l) 1437 { 1438 1439 SCHED_ASSERT_UNLOCKED(); 1440 _kernel_lock(0); 1441 } 1442 1443 void 1444 _kernel_proc_unlock(struct lwp *l) 1445 { 1446 1447 _kernel_unlock(); 1448 } 1449 1450 int 1451 _kernel_lock_release_all() 1452 { 1453 struct cpu_info *ci = curcpu(); 1454 int hold_count; 1455 1456 hold_count = ci->ci_data.cpu_biglock_count; 1457 1458 if (hold_count) { 1459 int s; 1460 1461 s = splbiglock(); 1462 ci->ci_data.cpu_biglock_count = 0; 1463 simple_unlock(&kernel_lock); 1464 splx(s); 1465 } 1466 1467 return hold_count; 1468 } 1469 1470 void 1471 _kernel_lock_acquire_count(int hold_count) 1472 { 1473 1474 KASSERT(curcpu()->ci_data.cpu_biglock_count == 0); 1475 1476 if (hold_count != 0) { 1477 struct cpu_info *ci = curcpu(); 1478 int s; 1479 1480 s = splbiglock(); 1481 while (!simple_lock_try(&kernel_lock)) { 1482 splx(s); 1483 SPINLOCK_SPIN_HOOK; 1484 s = splbiglock(); 1485 } 1486 ci->ci_data.cpu_biglock_count = hold_count; 1487 splx(s); 1488 } 1489 } 1490 #if defined(DEBUG) 1491 void 1492 _kernel_lock_assert_locked() 1493 { 1494 1495 LOCK_ASSERT(simple_lock_held(&kernel_lock)); 1496 } 1497 #endif 1498 #endif /* MULTIPROCESSOR */ 1499