1 /* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $ 42 * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $ 43 */ 44 45 #include "opt_lint.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/proc.h> 51 #include <sys/lock.h> 52 #include <sys/sysctl.h> 53 #include <sys/spinlock.h> 54 #include <sys/thread2.h> 55 #include <sys/spinlock2.h> 56 57 /* 58 * Locking primitives implementation. 59 * Locks provide shared/exclusive sychronization. 60 */ 61 62 #ifdef SIMPLELOCK_DEBUG 63 #define COUNT(td, x) (td)->td_locks += (x) 64 #else 65 #define COUNT(td, x) 66 #endif 67 68 #define LOCK_WAIT_TIME 100 69 #define LOCK_SAMPLE_WAIT 7 70 71 #if defined(DIAGNOSTIC) 72 #define LOCK_INLINE 73 #else 74 #define LOCK_INLINE __inline 75 #endif 76 77 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 78 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 79 80 static int acquire(struct lock *lkp, int extflags, int wanted); 81 82 static LOCK_INLINE void 83 sharelock(struct lock *lkp, int incr) { 84 lkp->lk_flags |= LK_SHARE_NONZERO; 85 lkp->lk_sharecount += incr; 86 } 87 88 static LOCK_INLINE int 89 shareunlock(struct lock *lkp, int decr) 90 { 91 int dowakeup = 0; 92 93 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 94 95 if (lkp->lk_sharecount == decr) { 96 lkp->lk_flags &= ~LK_SHARE_NONZERO; 97 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 98 dowakeup = 1; 99 } 100 lkp->lk_sharecount = 0; 101 } else { 102 lkp->lk_sharecount -= decr; 103 } 104 return(dowakeup); 105 } 106 107 /* 108 * lock acquisition helper routine. Called with the lock's spinlock held. 109 */ 110 static int 111 acquire(struct lock *lkp, int extflags, int wanted) 112 { 113 int error; 114 115 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 116 return EBUSY; 117 } 118 119 while ((lkp->lk_flags & wanted) != 0) { 120 lkp->lk_flags |= LK_WAIT_NONZERO; 121 lkp->lk_waitcount++; 122 123 /* 124 * Atomic spinlock release/sleep/reacquire. 125 */ 126 error = ssleep(lkp, &lkp->lk_spinlock, 127 ((extflags & LK_PCATCH) ? PCATCH : 0), 128 lkp->lk_wmesg, 129 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 130 if (lkp->lk_waitcount == 1) { 131 lkp->lk_flags &= ~LK_WAIT_NONZERO; 132 lkp->lk_waitcount = 0; 133 } else { 134 lkp->lk_waitcount--; 135 } 136 if (error) 137 return error; 138 if (extflags & LK_SLEEPFAIL) 139 return ENOLCK; 140 } 141 return 0; 142 } 143 144 /* 145 * Set, change, or release a lock. 146 * 147 * Shared requests increment the shared count. Exclusive requests set the 148 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 149 * accepted shared locks and shared-to-exclusive upgrades to go away. 150 * 151 * A spinlock is held for most of the procedure. We must not do anything 152 * fancy while holding the spinlock. 153 */ 154 int 155 #ifndef DEBUG_LOCKS 156 lockmgr(struct lock *lkp, u_int flags) 157 #else 158 debuglockmgr(struct lock *lkp, u_int flags, 159 const char *name, const char *file, int line) 160 #endif 161 { 162 thread_t td; 163 int error; 164 int extflags; 165 int dowakeup; 166 static int didpanic; 167 168 error = 0; 169 dowakeup = 0; 170 171 if (mycpu->gd_intr_nesting_level && 172 (flags & LK_NOWAIT) == 0 && 173 (flags & LK_TYPE_MASK) != LK_RELEASE && didpanic == 0) { 174 175 #ifndef DEBUG_LOCKS 176 didpanic = 1; 177 panic("lockmgr %s from %p: called from interrupt, ipi, " 178 "or hard code section", 179 lkp->lk_wmesg, ((int **)&lkp)[-1]); 180 #else 181 didpanic = 1; 182 panic("lockmgr %s from %s:%d: called from interrupt, ipi, " 183 "or hard code section", 184 lkp->lk_wmesg, file, line); 185 #endif 186 } 187 188 /* 189 * So sue me, I'm too tired. 190 */ 191 if (spin_trylock_wr(&lkp->lk_spinlock) == FALSE) { 192 if (flags & LK_NOSPINWAIT) 193 return(EBUSY); 194 spin_lock_wr(&lkp->lk_spinlock); 195 } 196 197 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 198 td = curthread; 199 200 switch (flags & LK_TYPE_MASK) { 201 case LK_SHARED: 202 /* 203 * If we are not the exclusive lock holder, we have to block 204 * while there is an exclusive lock holder or while an 205 * exclusive lock request or upgrade request is in progress. 206 * 207 * However, if P_DEADLKTREAT is set, we override exclusive 208 * lock requests or upgrade requests ( but not the exclusive 209 * lock itself ). 210 */ 211 if (lkp->lk_lockholder != td) { 212 if (td->td_flags & TDF_DEADLKTREAT) { 213 error = acquire( 214 lkp, 215 extflags, 216 LK_HAVE_EXCL 217 ); 218 } else { 219 error = acquire( 220 lkp, 221 extflags, 222 LK_HAVE_EXCL | LK_WANT_EXCL | 223 LK_WANT_UPGRADE 224 ); 225 } 226 if (error) 227 break; 228 sharelock(lkp, 1); 229 COUNT(td, 1); 230 break; 231 } 232 /* 233 * We hold an exclusive lock, so downgrade it to shared. 234 * An alternative would be to fail with EDEADLK. 235 */ 236 sharelock(lkp, 1); 237 COUNT(td, 1); 238 /* fall into downgrade */ 239 240 case LK_DOWNGRADE: 241 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) { 242 spin_unlock_wr(&lkp->lk_spinlock); 243 panic("lockmgr: not holding exclusive lock"); 244 } 245 sharelock(lkp, lkp->lk_exclusivecount); 246 lkp->lk_exclusivecount = 0; 247 lkp->lk_flags &= ~LK_HAVE_EXCL; 248 lkp->lk_lockholder = LK_NOTHREAD; 249 if (lkp->lk_waitcount) 250 dowakeup = 1; 251 break; 252 253 case LK_EXCLUPGRADE: 254 /* 255 * If another process is ahead of us to get an upgrade, 256 * then we want to fail rather than have an intervening 257 * exclusive access. 258 */ 259 if (lkp->lk_flags & LK_WANT_UPGRADE) { 260 dowakeup = shareunlock(lkp, 1); 261 COUNT(td, -1); 262 error = EBUSY; 263 break; 264 } 265 /* fall into normal upgrade */ 266 267 case LK_UPGRADE: 268 /* 269 * Upgrade a shared lock to an exclusive one. If another 270 * shared lock has already requested an upgrade to an 271 * exclusive lock, our shared lock is released and an 272 * exclusive lock is requested (which will be granted 273 * after the upgrade). If we return an error, the file 274 * will always be unlocked. 275 */ 276 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) { 277 spin_unlock_wr(&lkp->lk_spinlock); 278 panic("lockmgr: upgrade exclusive lock"); 279 } 280 dowakeup += shareunlock(lkp, 1); 281 COUNT(td, -1); 282 /* 283 * If we are just polling, check to see if we will block. 284 */ 285 if ((extflags & LK_NOWAIT) && 286 ((lkp->lk_flags & LK_WANT_UPGRADE) || 287 lkp->lk_sharecount > 1)) { 288 error = EBUSY; 289 break; 290 } 291 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 292 /* 293 * We are first shared lock to request an upgrade, so 294 * request upgrade and wait for the shared count to 295 * drop to zero, then take exclusive lock. 296 */ 297 lkp->lk_flags |= LK_WANT_UPGRADE; 298 error = acquire(lkp, extflags, LK_SHARE_NONZERO); 299 lkp->lk_flags &= ~LK_WANT_UPGRADE; 300 301 if (error) 302 break; 303 lkp->lk_flags |= LK_HAVE_EXCL; 304 lkp->lk_lockholder = td; 305 if (lkp->lk_exclusivecount != 0) { 306 spin_unlock_wr(&lkp->lk_spinlock); 307 panic("lockmgr: non-zero exclusive count"); 308 } 309 lkp->lk_exclusivecount = 1; 310 #if defined(DEBUG_LOCKS) 311 lkp->lk_filename = file; 312 lkp->lk_lineno = line; 313 lkp->lk_lockername = name; 314 #endif 315 COUNT(td, 1); 316 break; 317 } 318 /* 319 * Someone else has requested upgrade. Release our shared 320 * lock, awaken upgrade requestor if we are the last shared 321 * lock, then request an exclusive lock. 322 */ 323 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 324 LK_WAIT_NONZERO) { 325 ++dowakeup; 326 } 327 /* fall into exclusive request */ 328 329 case LK_EXCLUSIVE: 330 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) { 331 /* 332 * Recursive lock. 333 */ 334 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) { 335 spin_unlock_wr(&lkp->lk_spinlock); 336 panic("lockmgr: locking against myself"); 337 } 338 if ((extflags & LK_CANRECURSE) != 0) { 339 lkp->lk_exclusivecount++; 340 COUNT(td, 1); 341 break; 342 } 343 } 344 /* 345 * If we are just polling, check to see if we will sleep. 346 */ 347 if ((extflags & LK_NOWAIT) && 348 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 349 error = EBUSY; 350 break; 351 } 352 /* 353 * Try to acquire the want_exclusive flag. 354 */ 355 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 356 if (error) 357 break; 358 lkp->lk_flags |= LK_WANT_EXCL; 359 /* 360 * Wait for shared locks and upgrades to finish. 361 */ 362 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 363 lkp->lk_flags &= ~LK_WANT_EXCL; 364 if (error) 365 break; 366 lkp->lk_flags |= LK_HAVE_EXCL; 367 lkp->lk_lockholder = td; 368 if (lkp->lk_exclusivecount != 0) { 369 spin_unlock_wr(&lkp->lk_spinlock); 370 panic("lockmgr: non-zero exclusive count"); 371 } 372 lkp->lk_exclusivecount = 1; 373 #if defined(DEBUG_LOCKS) 374 lkp->lk_filename = file; 375 lkp->lk_lineno = line; 376 lkp->lk_lockername = name; 377 #endif 378 COUNT(td, 1); 379 break; 380 381 case LK_RELEASE: 382 if (lkp->lk_exclusivecount != 0) { 383 if (lkp->lk_lockholder != td && 384 lkp->lk_lockholder != LK_KERNTHREAD) { 385 spin_unlock_wr(&lkp->lk_spinlock); 386 panic("lockmgr: pid %d, not %s thr %p/%p unlocking", 387 (td->td_proc ? td->td_proc->p_pid : -1), 388 "exclusive lock holder", 389 td, lkp->lk_lockholder); 390 } 391 if (lkp->lk_lockholder != LK_KERNTHREAD) { 392 COUNT(td, -1); 393 } 394 if (lkp->lk_exclusivecount == 1) { 395 lkp->lk_flags &= ~LK_HAVE_EXCL; 396 lkp->lk_lockholder = LK_NOTHREAD; 397 lkp->lk_exclusivecount = 0; 398 } else { 399 lkp->lk_exclusivecount--; 400 } 401 } else if (lkp->lk_flags & LK_SHARE_NONZERO) { 402 dowakeup += shareunlock(lkp, 1); 403 COUNT(td, -1); 404 } 405 if (lkp->lk_flags & LK_WAIT_NONZERO) 406 ++dowakeup; 407 break; 408 409 default: 410 spin_unlock_wr(&lkp->lk_spinlock); 411 panic("lockmgr: unknown locktype request %d", 412 flags & LK_TYPE_MASK); 413 /* NOTREACHED */ 414 } 415 spin_unlock_wr(&lkp->lk_spinlock); 416 if (dowakeup) 417 wakeup(lkp); 418 return (error); 419 } 420 421 void 422 lockmgr_kernproc(struct lock *lp) 423 { 424 struct thread *td __debugvar = curthread; 425 426 if (lp->lk_lockholder != LK_KERNTHREAD) { 427 KASSERT(lp->lk_lockholder == td, 428 ("lockmgr_kernproc: lock not owned by curthread %p", td)); 429 COUNT(td, -1); 430 lp->lk_lockholder = LK_KERNTHREAD; 431 } 432 } 433 434 /* 435 * Set the lock to be exclusively held. The caller is holding the lock's 436 * spinlock and the spinlock remains held on return. A panic will occur 437 * if the lock cannot be set to exclusive. 438 */ 439 void 440 lockmgr_setexclusive_interlocked(struct lock *lkp) 441 { 442 thread_t td = curthread; 443 444 KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0); 445 KKASSERT(lkp->lk_exclusivecount == 0); 446 lkp->lk_flags |= LK_HAVE_EXCL; 447 lkp->lk_lockholder = td; 448 lkp->lk_exclusivecount = 1; 449 COUNT(td, 1); 450 } 451 452 /* 453 * Clear the caller's exclusive lock. The caller is holding the lock's 454 * spinlock. THIS FUNCTION WILL UNLOCK THE SPINLOCK. 455 * 456 * A panic will occur if the caller does not hold the lock. 457 */ 458 void 459 lockmgr_clrexclusive_interlocked(struct lock *lkp) 460 { 461 thread_t td __debugvar = curthread; 462 int dowakeup = 0; 463 464 KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1 465 && lkp->lk_lockholder == td); 466 lkp->lk_lockholder = LK_NOTHREAD; 467 lkp->lk_flags &= ~LK_HAVE_EXCL; 468 lkp->lk_exclusivecount = 0; 469 if (lkp->lk_flags & LK_WAIT_NONZERO) 470 dowakeup = 1; 471 COUNT(td, -1); 472 spin_unlock_wr(&lkp->lk_spinlock); 473 if (dowakeup) 474 wakeup((void *)lkp); 475 } 476 477 /* 478 * Initialize a lock; required before use. 479 */ 480 void 481 lockinit(struct lock *lkp, char *wmesg, int timo, int flags) 482 { 483 spin_init(&lkp->lk_spinlock); 484 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 485 lkp->lk_sharecount = 0; 486 lkp->lk_waitcount = 0; 487 lkp->lk_exclusivecount = 0; 488 lkp->lk_wmesg = wmesg; 489 lkp->lk_timo = timo; 490 lkp->lk_lockholder = LK_NOTHREAD; 491 } 492 493 /* 494 * Reinitialize a lock that is being reused for a different purpose, but 495 * which may have pending (blocked) threads sitting on it. The caller 496 * must already hold the interlock. 497 */ 498 void 499 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags) 500 { 501 spin_lock_wr(&lkp->lk_spinlock); 502 lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) | 503 (flags & LK_EXTFLG_MASK); 504 lkp->lk_wmesg = wmesg; 505 lkp->lk_timo = timo; 506 spin_unlock_wr(&lkp->lk_spinlock); 507 } 508 509 /* 510 * Requires that the caller is the exclusive owner of this lock. 511 */ 512 void 513 lockuninit(struct lock *l) 514 { 515 /* 516 * At this point we should have removed all the references to this lock 517 * so there can't be anyone waiting on it. 518 */ 519 KKASSERT(l->lk_waitcount == 0); 520 521 spin_uninit(&l->lk_spinlock); 522 } 523 524 /* 525 * Determine the status of a lock. 526 */ 527 int 528 lockstatus(struct lock *lkp, struct thread *td) 529 { 530 int lock_type = 0; 531 532 spin_lock_wr(&lkp->lk_spinlock); 533 if (lkp->lk_exclusivecount != 0) { 534 if (td == NULL || lkp->lk_lockholder == td) 535 lock_type = LK_EXCLUSIVE; 536 else 537 lock_type = LK_EXCLOTHER; 538 } else if (lkp->lk_sharecount != 0) { 539 lock_type = LK_SHARED; 540 } 541 spin_unlock_wr(&lkp->lk_spinlock); 542 return (lock_type); 543 } 544 545 /* 546 * Return non-zero if the caller owns the lock shared or exclusive. 547 * We can only guess re: shared locks. 548 */ 549 int 550 lockowned(struct lock *lkp) 551 { 552 thread_t td = curthread; 553 554 if (lkp->lk_exclusivecount) 555 return(lkp->lk_lockholder == td); 556 return(lkp->lk_sharecount != 0); 557 } 558 559 /* 560 * Determine the number of holders of a lock. 561 * 562 * The non-blocking version can usually be used for assertions. 563 */ 564 int 565 lockcount(struct lock *lkp) 566 { 567 int count; 568 569 spin_lock_wr(&lkp->lk_spinlock); 570 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 571 spin_unlock_wr(&lkp->lk_spinlock); 572 return (count); 573 } 574 575 int 576 lockcountnb(struct lock *lkp) 577 { 578 return (lkp->lk_exclusivecount + lkp->lk_sharecount); 579 } 580 581 /* 582 * Print out information about state of a lock. Used by VOP_PRINT 583 * routines to display status about contained locks. 584 */ 585 void 586 lockmgr_printinfo(struct lock *lkp) 587 { 588 struct thread *td = lkp->lk_lockholder; 589 struct proc *p; 590 591 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD) 592 p = td->td_proc; 593 else 594 p = NULL; 595 596 if (lkp->lk_sharecount) 597 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 598 lkp->lk_sharecount); 599 else if (lkp->lk_flags & LK_HAVE_EXCL) 600 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d", 601 lkp->lk_wmesg, lkp->lk_exclusivecount, td, 602 p ? p->p_pid : -99); 603 if (lkp->lk_waitcount > 0) 604 kprintf(" with %d pending", lkp->lk_waitcount); 605 } 606 607