1 /* $NetBSD: kern_lock.c,v 1.13 1998/12/02 10:41:01 bouyer Exp $ */ 2 3 /* 4 * Copyright (c) 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code contains ideas from software contributed to Berkeley by 8 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 9 * System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 40 */ 41 42 #include "opt_lockdebug.h" 43 44 #include <sys/param.h> 45 #include <sys/proc.h> 46 #include <sys/lock.h> 47 #include <sys/systm.h> 48 #include <machine/cpu.h> 49 50 /* 51 * Locking primitives implementation. 52 * Locks provide shared/exclusive sychronization. 53 */ 54 55 #ifdef LOCKDEBUG 56 #define COUNT(p, x) if (p) (p)->p_locks += (x) 57 #else 58 #define COUNT(p, x) 59 #endif 60 61 #if 0 /*#was defined(MULTIPROCESSOR)*/ 62 /*- 63 64 This macro is Bad Style and it doesn't work either... [pk, 10-14-1998] 65 66 -* 67 * For multiprocessor system, try spin lock first. 68 * 69 * This should be inline expanded below, but we cannot have #if 70 * inside a multiline define. 71 */ 72 73 int lock_wait_time = 100; 74 #define PAUSE(lkp, wanted) \ 75 if (lock_wait_time > 0) { \ 76 int i; \ 77 \ 78 simple_unlock(&lkp->lk_interlock); \ 79 for (i = lock_wait_time; i > 0; i--) \ 80 if (!(wanted)) \ 81 break; \ 82 simple_lock(&lkp->lk_interlock); \ 83 } \ 84 if (!(wanted)) \ 85 break; 86 87 #else /* ! MULTIPROCESSOR */ 88 89 /* 90 * It is an error to spin on a uniprocessor as nothing will ever cause 91 * the simple lock to clear while we are executing. 92 */ 93 #define PAUSE(lkp, wanted) 94 95 #endif /* MULTIPROCESSOR */ 96 97 /* 98 * Acquire a resource. 99 */ 100 #define ACQUIRE(lkp, error, extflags, wanted) \ 101 PAUSE(lkp, wanted); \ 102 for (error = 0; wanted; ) { \ 103 (lkp)->lk_waitcount++; \ 104 simple_unlock(&(lkp)->lk_interlock); \ 105 error = tsleep((void *)lkp, (lkp)->lk_prio, \ 106 (lkp)->lk_wmesg, (lkp)->lk_timo); \ 107 simple_lock(&(lkp)->lk_interlock); \ 108 (lkp)->lk_waitcount--; \ 109 if (error) \ 110 break; \ 111 if ((extflags) & LK_SLEEPFAIL) { \ 112 error = ENOLCK; \ 113 break; \ 114 } \ 115 } 116 117 /* 118 * Initialize a lock; required before use. 119 */ 120 void 121 lockinit(lkp, prio, wmesg, timo, flags) 122 struct lock *lkp; 123 int prio; 124 const char *wmesg; 125 int timo; 126 int flags; 127 { 128 129 memset(lkp, 0, sizeof(struct lock)); 130 simple_lock_init(&lkp->lk_interlock); 131 lkp->lk_flags = flags & LK_EXTFLG_MASK; 132 lkp->lk_prio = prio; 133 lkp->lk_timo = timo; 134 lkp->lk_wmesg = wmesg; 135 lkp->lk_lockholder = LK_NOPROC; 136 } 137 138 /* 139 * Determine the status of a lock. 140 */ 141 int 142 lockstatus(lkp) 143 struct lock *lkp; 144 { 145 int lock_type = 0; 146 147 simple_lock(&lkp->lk_interlock); 148 if (lkp->lk_exclusivecount != 0) 149 lock_type = LK_EXCLUSIVE; 150 else if (lkp->lk_sharecount != 0) 151 lock_type = LK_SHARED; 152 simple_unlock(&lkp->lk_interlock); 153 return (lock_type); 154 } 155 156 /* 157 * Set, change, or release a lock. 158 * 159 * Shared requests increment the shared count. Exclusive requests set the 160 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 161 * accepted shared locks and shared-to-exclusive upgrades to go away. 162 */ 163 int 164 lockmgr(lkp, flags, interlkp) 165 __volatile struct lock *lkp; 166 u_int flags; 167 struct simplelock *interlkp; 168 { 169 int error; 170 pid_t pid; 171 int extflags; 172 struct proc *p = curproc; 173 174 error = 0; 175 if (p) 176 pid = p->p_pid; 177 else 178 pid = LK_KERNPROC; 179 simple_lock(&lkp->lk_interlock); 180 if (flags & LK_INTERLOCK) 181 simple_unlock(interlkp); 182 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 183 #ifdef DIAGNOSTIC 184 /* 185 * Once a lock has drained, the LK_DRAINING flag is set and an 186 * exclusive lock is returned. The only valid operation thereafter 187 * is a single release of that exclusive lock. This final release 188 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 189 * further requests of any sort will result in a panic. The bits 190 * selected for these two flags are chosen so that they will be set 191 * in memory that is freed (freed memory is filled with 0xdeadbeef). 192 * The final release is permitted to give a new lease on life to 193 * the lock by specifying LK_REENABLE. 194 */ 195 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 196 if (lkp->lk_flags & LK_DRAINED) 197 panic("lockmgr: using decommissioned lock"); 198 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 199 lkp->lk_lockholder != pid) 200 panic("lockmgr: non-release on draining lock: %d\n", 201 flags & LK_TYPE_MASK); 202 lkp->lk_flags &= ~LK_DRAINING; 203 if ((flags & LK_REENABLE) == 0) 204 lkp->lk_flags |= LK_DRAINED; 205 } 206 #endif DIAGNOSTIC 207 208 switch (flags & LK_TYPE_MASK) { 209 210 case LK_SHARED: 211 if (lkp->lk_lockholder != pid) { 212 /* 213 * If just polling, check to see if we will block. 214 */ 215 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 216 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 217 error = EBUSY; 218 break; 219 } 220 /* 221 * Wait for exclusive locks and upgrades to clear. 222 */ 223 ACQUIRE(lkp, error, extflags, lkp->lk_flags & 224 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 225 if (error) 226 break; 227 lkp->lk_sharecount++; 228 COUNT(p, 1); 229 break; 230 } 231 /* 232 * We hold an exclusive lock, so downgrade it to shared. 233 * An alternative would be to fail with EDEADLK. 234 */ 235 lkp->lk_sharecount++; 236 COUNT(p, 1); 237 /* fall into downgrade */ 238 239 case LK_DOWNGRADE: 240 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 241 panic("lockmgr: not holding exclusive lock"); 242 lkp->lk_sharecount += lkp->lk_exclusivecount; 243 lkp->lk_exclusivecount = 0; 244 lkp->lk_flags &= ~LK_HAVE_EXCL; 245 lkp->lk_lockholder = LK_NOPROC; 246 if (lkp->lk_waitcount) 247 wakeup((void *)lkp); 248 break; 249 250 case LK_EXCLUPGRADE: 251 /* 252 * If another process is ahead of us to get an upgrade, 253 * then we want to fail rather than have an intervening 254 * exclusive access. 255 */ 256 if (lkp->lk_flags & LK_WANT_UPGRADE) { 257 lkp->lk_sharecount--; 258 COUNT(p, -1); 259 error = EBUSY; 260 break; 261 } 262 /* fall into normal upgrade */ 263 264 case LK_UPGRADE: 265 /* 266 * Upgrade a shared lock to an exclusive one. If another 267 * shared lock has already requested an upgrade to an 268 * exclusive lock, our shared lock is released and an 269 * exclusive lock is requested (which will be granted 270 * after the upgrade). If we return an error, the file 271 * will always be unlocked. 272 */ 273 if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 274 panic("lockmgr: upgrade exclusive lock"); 275 lkp->lk_sharecount--; 276 COUNT(p, -1); 277 /* 278 * If we are just polling, check to see if we will block. 279 */ 280 if ((extflags & LK_NOWAIT) && 281 ((lkp->lk_flags & LK_WANT_UPGRADE) || 282 lkp->lk_sharecount > 1)) { 283 error = EBUSY; 284 break; 285 } 286 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 287 /* 288 * We are first shared lock to request an upgrade, so 289 * request upgrade and wait for the shared count to 290 * drop to zero, then take exclusive lock. 291 */ 292 lkp->lk_flags |= LK_WANT_UPGRADE; 293 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 294 lkp->lk_flags &= ~LK_WANT_UPGRADE; 295 if (error) 296 break; 297 lkp->lk_flags |= LK_HAVE_EXCL; 298 lkp->lk_lockholder = pid; 299 if (lkp->lk_exclusivecount != 0) 300 panic("lockmgr: non-zero exclusive count"); 301 lkp->lk_exclusivecount = 1; 302 COUNT(p, 1); 303 break; 304 } 305 /* 306 * Someone else has requested upgrade. Release our shared 307 * lock, awaken upgrade requestor if we are the last shared 308 * lock, then request an exclusive lock. 309 */ 310 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) 311 wakeup((void *)lkp); 312 /* fall into exclusive request */ 313 314 case LK_EXCLUSIVE: 315 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 316 /* 317 * Recursive lock. 318 */ 319 if ((extflags & LK_CANRECURSE) == 0) 320 panic("lockmgr: locking against myself"); 321 lkp->lk_exclusivecount++; 322 COUNT(p, 1); 323 break; 324 } 325 /* 326 * If we are just polling, check to see if we will sleep. 327 */ 328 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 329 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 330 lkp->lk_sharecount != 0)) { 331 error = EBUSY; 332 break; 333 } 334 /* 335 * Try to acquire the want_exclusive flag. 336 */ 337 ACQUIRE(lkp, error, extflags, lkp->lk_flags & 338 (LK_HAVE_EXCL | LK_WANT_EXCL)); 339 if (error) 340 break; 341 lkp->lk_flags |= LK_WANT_EXCL; 342 /* 343 * Wait for shared locks and upgrades to finish. 344 */ 345 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 346 (lkp->lk_flags & LK_WANT_UPGRADE)); 347 lkp->lk_flags &= ~LK_WANT_EXCL; 348 if (error) 349 break; 350 lkp->lk_flags |= LK_HAVE_EXCL; 351 lkp->lk_lockholder = pid; 352 if (lkp->lk_exclusivecount != 0) 353 panic("lockmgr: non-zero exclusive count"); 354 lkp->lk_exclusivecount = 1; 355 COUNT(p, 1); 356 break; 357 358 case LK_RELEASE: 359 if (lkp->lk_exclusivecount != 0) { 360 if (pid != lkp->lk_lockholder) 361 panic("lockmgr: pid %d, not exclusive lock " 362 "holder %d unlocking", pid, 363 lkp->lk_lockholder); 364 lkp->lk_exclusivecount--; 365 COUNT(p, -1); 366 if (lkp->lk_exclusivecount == 0) { 367 lkp->lk_flags &= ~LK_HAVE_EXCL; 368 lkp->lk_lockholder = LK_NOPROC; 369 } 370 } else if (lkp->lk_sharecount != 0) { 371 lkp->lk_sharecount--; 372 COUNT(p, -1); 373 } 374 if (lkp->lk_waitcount) 375 wakeup((void *)lkp); 376 break; 377 378 case LK_DRAIN: 379 /* 380 * Check that we do not already hold the lock, as it can 381 * never drain if we do. Unfortunately, we have no way to 382 * check for holding a shared lock, but at least we can 383 * check for an exclusive one. 384 */ 385 if (lkp->lk_lockholder == pid) 386 panic("lockmgr: draining against myself"); 387 /* 388 * If we are just polling, check to see if we will sleep. 389 */ 390 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 391 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 392 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 393 error = EBUSY; 394 break; 395 } 396 PAUSE(lkp, ((lkp->lk_flags & 397 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 398 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); 399 for (error = 0; ((lkp->lk_flags & 400 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 401 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { 402 lkp->lk_flags |= LK_WAITDRAIN; 403 simple_unlock(&lkp->lk_interlock); 404 if ((error = tsleep((void *)&lkp->lk_flags, 405 lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo))) 406 return (error); 407 if ((extflags) & LK_SLEEPFAIL) 408 return (ENOLCK); 409 simple_lock(&lkp->lk_interlock); 410 } 411 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 412 lkp->lk_lockholder = pid; 413 lkp->lk_exclusivecount = 1; 414 COUNT(p, 1); 415 break; 416 417 default: 418 simple_unlock(&lkp->lk_interlock); 419 panic("lockmgr: unknown locktype request %d", 420 flags & LK_TYPE_MASK); 421 /* NOTREACHED */ 422 } 423 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & 424 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 425 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 426 lkp->lk_flags &= ~LK_WAITDRAIN; 427 wakeup((void *)&lkp->lk_flags); 428 } 429 simple_unlock(&lkp->lk_interlock); 430 return (error); 431 } 432 433 /* 434 * Print out information about state of a lock. Used by VOP_PRINT 435 * routines to display ststus about contained locks. 436 */ 437 void 438 lockmgr_printinfo(lkp) 439 struct lock *lkp; 440 { 441 442 if (lkp->lk_sharecount) 443 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 444 lkp->lk_sharecount); 445 else if (lkp->lk_flags & LK_HAVE_EXCL) 446 printf(" lock type %s: EXCL (count %d) by pid %d", 447 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 448 if (lkp->lk_waitcount > 0) 449 printf(" with %d pending", lkp->lk_waitcount); 450 } 451 452 #if defined(LOCKDEBUG) && !defined(MULTIPROCESSOR) 453 #include <sys/kernel.h> 454 #include <vm/vm.h> 455 #include <sys/sysctl.h> 456 int lockpausetime = 0; 457 struct ctldebug debug2 = { "lockpausetime", &lockpausetime }; 458 int simplelockrecurse; 459 LIST_HEAD(slocklist, simplelock) slockdebuglist; 460 461 /* 462 * Simple lock functions so that the debugger can see from whence 463 * they are being called. 464 */ 465 void 466 simple_lock_init(alp) 467 struct simplelock *alp; 468 { 469 alp->lock_data = 0; 470 alp->lock_file = NULL; 471 alp->lock_line = 0; 472 alp->unlock_file = NULL; 473 alp->unlock_line = 0; 474 alp->lock_holder = 0; 475 } 476 477 void 478 _simple_lock(alp, id, l) 479 __volatile struct simplelock *alp; 480 const char *id; 481 int l; 482 { 483 int s; 484 485 if (simplelockrecurse) 486 return; 487 if (alp->lock_data == 1) { 488 printf("simple_lock: lock held\n"); 489 printf("currently at: %s:%d\n", id, l); 490 printf("last locked: %s:%d\n", 491 alp->lock_file, alp->lock_line); 492 printf("last unlocked: %s:%d\n", 493 alp->unlock_file, alp->unlock_line); 494 if (lockpausetime == -1) 495 panic("simple_lock: lock held"); 496 if (lockpausetime == 1) { 497 #ifdef BACKTRACE 498 BACKTRACE(curproc); 499 #endif 500 } else if (lockpausetime > 1) { 501 printf("simple_lock: lock held, pausing..."); 502 tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 503 lockpausetime * hz); 504 printf(" continuing\n"); 505 } 506 return; 507 } 508 509 s = splhigh(); 510 LIST_INSERT_HEAD(&slockdebuglist, (struct simplelock *)alp, list); 511 splx(s); 512 513 alp->lock_data = 1; 514 alp->lock_file = id; 515 alp->lock_line = l; 516 if (curproc) 517 curproc->p_simple_locks++; 518 } 519 520 int 521 _simple_lock_try(alp, id, l) 522 __volatile struct simplelock *alp; 523 const char *id; 524 int l; 525 { 526 int s; 527 528 if (alp->lock_data) 529 return (0); 530 if (simplelockrecurse) 531 return (1); 532 alp->lock_data = 1; 533 alp->lock_file = id; 534 alp->lock_line = l; 535 536 s = splhigh(); 537 LIST_INSERT_HEAD(&slockdebuglist, (struct simplelock *)alp, list); 538 splx(s); 539 540 if (curproc) 541 curproc->p_simple_locks++; 542 return (1); 543 } 544 545 void 546 _simple_unlock(alp, id, l) 547 __volatile struct simplelock *alp; 548 const char *id; 549 int l; 550 { 551 int s; 552 553 if (simplelockrecurse) 554 return; 555 if (alp->lock_data == 0) { 556 printf("simple_unlock: lock not held\n"); 557 printf("currently at: %s:%d\n", id, l); 558 printf("last locked: %s:%d\n", 559 alp->lock_file, alp->lock_line); 560 printf("last unlocked: %s:%d\n", 561 alp->unlock_file, alp->unlock_line); 562 if (lockpausetime == -1) 563 panic("simple_unlock: lock not held"); 564 if (lockpausetime == 1) { 565 #ifdef BACKTRACE 566 BACKTRACE(curproc); 567 #endif 568 } else if (lockpausetime > 1) { 569 printf("simple_unlock: lock not held, pausing..."); 570 tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", 571 lockpausetime * hz); 572 printf(" continuing\n"); 573 } 574 return; 575 } 576 577 s = splhigh(); 578 LIST_REMOVE(alp, list); 579 alp->list.le_next = NULL; 580 alp->list.le_prev = NULL; 581 splx(s); 582 583 alp->lock_data = 0; 584 alp->unlock_file = id; 585 alp->unlock_line = l; 586 if (curproc) 587 curproc->p_simple_locks--; 588 } 589 590 void 591 simple_lock_dump() 592 { 593 struct simplelock *alp; 594 int s; 595 596 s = splhigh(); 597 printf("all simple locks:\n"); 598 for (alp = LIST_FIRST(&slockdebuglist); 599 alp != NULL; 600 alp = LIST_NEXT(alp, list)) { 601 printf("%p %s:%d\n", alp, alp->lock_file, alp->lock_line); 602 } 603 splx(s); 604 } 605 606 void 607 simple_lock_freecheck(start, end) 608 void *start, *end; 609 { 610 struct simplelock *alp; 611 int s; 612 613 s = splhigh(); 614 for (alp = LIST_FIRST(&slockdebuglist); 615 alp != NULL; 616 alp = LIST_NEXT(alp, list)) { 617 if ((void *)alp >= start && (void *)alp < end) { 618 printf("freeing simple_lock %p\n", alp); 619 #ifdef DDB 620 Debugger(); 621 #endif 622 } 623 } 624 splx(s); 625 } 626 #endif /* LOCKDEBUG && ! MULTIPROCESSOR */ 627