168762Smckusick /* 268762Smckusick * Copyright (c) 1995 368762Smckusick * The Regents of the University of California. All rights reserved. 468762Smckusick * 568775Smckusick * This code contains ideas from software contributed to Berkeley by 668762Smckusick * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 768762Smckusick * System project at Carnegie-Mellon University. 868762Smckusick * 968762Smckusick * %sccs.include.redist.c% 1068762Smckusick * 11*68945Smckusick * @(#)kern_lock.c 8.8 (Berkeley) 04/27/95 1268762Smckusick */ 1368762Smckusick 1468762Smckusick #include <sys/param.h> 1568762Smckusick #include <sys/proc.h> 1668762Smckusick #include <sys/lock.h> 1768762Smckusick 1868762Smckusick /* 1968762Smckusick * Locking primitives implementation. 2068762Smckusick * Locks provide shared/exclusive sychronization. 2168762Smckusick */ 2268762Smckusick 2368762Smckusick #if NCPUS > 1 2468762Smckusick 2568762Smckusick /* 2668762Smckusick * For multiprocessor system, try spin lock first. 2768762Smckusick * 2868762Smckusick * This should be inline expanded below, but we cannot have #if 2968762Smckusick * inside a multiline define. 3068762Smckusick */ 3168762Smckusick int lock_wait_time = 100; 3268762Smckusick #define PAUSE(lkp, wanted) \ 3368762Smckusick if (lock_wait_time > 0) { \ 3468762Smckusick int i; \ 3568762Smckusick \ 3668933Smckusick simple_unlock(&lkp->lk_interlock); \ 3768762Smckusick for (i = lock_wait_time; i > 0; i--) \ 3868762Smckusick if (!(wanted)) \ 3968762Smckusick break; \ 4068933Smckusick simple_lock(&lkp->lk_interlock); \ 4168762Smckusick } \ 4268762Smckusick if (!(wanted)) \ 4368762Smckusick break; 4468762Smckusick 4568762Smckusick #else /* NCPUS == 1 */ 4668762Smckusick 4768762Smckusick /* 4868762Smckusick * It is an error to spin on a uniprocessor as nothing will ever cause 4968933Smckusick * the simple lock to clear while we are executing. 5068762Smckusick */ 5168762Smckusick #define PAUSE(lkp, wanted) 5268762Smckusick 5368933Smckusick /* 5468933Smckusick * Panic messages for inline expanded simple locks. 5568933Smckusick * Put text here to avoid hundreds of copies. 5668933Smckusick */ 5768933Smckusick const char *simple_lock_held = "simple_lock: lock held"; 5868933Smckusick const char *simple_lock_not_held = "simple_lock: lock not held"; 5968933Smckusick 6068762Smckusick #endif /* NCPUS == 1 */ 6168762Smckusick 6268762Smckusick /* 6368762Smckusick * Acquire a resource. 6468762Smckusick */ 6568762Smckusick #define ACQUIRE(lkp, error, extflags, wanted) \ 6668762Smckusick PAUSE(lkp, wanted); \ 6768762Smckusick for (error = 0; wanted; ) { \ 6868800Smckusick (lkp)->lk_waitcount++; \ 6968933Smckusick simple_unlock(&(lkp)->lk_interlock); \ 7068779Smckusick error = tsleep((void *)lkp, (lkp)->lk_prio, \ 7168779Smckusick (lkp)->lk_wmesg, (lkp)->lk_timo); \ 7268933Smckusick simple_lock(&(lkp)->lk_interlock); \ 7368800Smckusick (lkp)->lk_waitcount--; \ 7468762Smckusick if (error) \ 7568762Smckusick break; \ 7668762Smckusick if ((extflags) & LK_SLEEPFAIL) { \ 7768762Smckusick error = ENOLCK; \ 7868762Smckusick break; \ 7968762Smckusick } \ 8068762Smckusick } 8168762Smckusick 8268762Smckusick /* 8368762Smckusick * Initialize a lock; required before use. 8468762Smckusick */ 8568782Smckusick void 8668782Smckusick lock_init(lkp, prio, wmesg, timo, flags) 8768762Smckusick struct lock *lkp; 8868762Smckusick int prio; 8968762Smckusick char *wmesg; 9068762Smckusick int timo; 9168762Smckusick int flags; 9268762Smckusick { 9368762Smckusick bzero(lkp, sizeof(struct lock)); 9468933Smckusick simple_lock_init(&lkp->lk_interlock); 9568762Smckusick lkp->lk_flags = flags & LK_EXTFLG_MASK; 9668762Smckusick lkp->lk_prio = prio; 9768762Smckusick lkp->lk_timo = timo; 9868762Smckusick lkp->lk_wmesg = wmesg; 9968762Smckusick lkp->lk_lockholder = LK_NOPROC; 10068762Smckusick } 10168762Smckusick 10268762Smckusick /* 10368780Smckusick * Determine the status of a lock. 10468780Smckusick */ 10568780Smckusick int 10668780Smckusick lockstatus(lkp) 10768780Smckusick struct lock *lkp; 10868780Smckusick { 10968780Smckusick int lock_type = 0; 11068780Smckusick 11168933Smckusick simple_lock(&lkp->lk_interlock); 11268780Smckusick if (lkp->lk_exclusivecount != 0) 11368780Smckusick lock_type = LK_EXCLUSIVE; 11468780Smckusick else if (lkp->lk_sharecount != 0) 11568780Smckusick lock_type = LK_SHARED; 11668933Smckusick simple_unlock(&lkp->lk_interlock); 11768780Smckusick return (lock_type); 11868780Smckusick } 11968780Smckusick 12068780Smckusick /* 12168762Smckusick * Set, change, or release a lock. 12268762Smckusick * 12368762Smckusick * Shared requests increment the shared count. Exclusive requests set the 12468762Smckusick * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 12568762Smckusick * accepted shared locks and shared-to-exclusive upgrades to go away. 12668762Smckusick */ 12768782Smckusick int 128*68945Smckusick lockmgr(lkp, flags, interlkp, pid) 129*68945Smckusick __volatile struct lock *lkp; 13068800Smckusick u_int flags; 131*68945Smckusick struct simple_lock *interlkp; 132*68945Smckusick pid_t pid; 13368762Smckusick { 13468779Smckusick int error; 135*68945Smckusick __volatile int extflags; 13668762Smckusick 13768800Smckusick error = 0; 13868933Smckusick simple_lock(&lkp->lk_interlock); 139*68945Smckusick if (flags & LK_INTERLOCK) 140*68945Smckusick simple_unlock(interlkp); 14168762Smckusick extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 142*68945Smckusick if ((lkp->lk_flags & LK_DRAINED) && 143*68945Smckusick (((flags & LK_TYPE_MASK) != LK_RELEASE) || 144*68945Smckusick lkp->lk_lockholder != pid)) 14568800Smckusick panic("lockmgr: using decommissioned lock"); 14668762Smckusick 14768762Smckusick switch (flags & LK_TYPE_MASK) { 14868762Smckusick 14968762Smckusick case LK_SHARED: 15068762Smckusick if (lkp->lk_lockholder != pid) { 15168762Smckusick /* 15268762Smckusick * If just polling, check to see if we will block. 15368762Smckusick */ 15468762Smckusick if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 15568762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 15668800Smckusick error = EBUSY; 15768800Smckusick break; 15868762Smckusick } 15968762Smckusick /* 16068762Smckusick * Wait for exclusive locks and upgrades to clear. 16168762Smckusick */ 16268762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 16368762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 16468800Smckusick if (error) 16568800Smckusick break; 16668762Smckusick lkp->lk_sharecount++; 16768800Smckusick break; 16868762Smckusick } 16968762Smckusick /* 17068762Smckusick * We hold an exclusive lock, so downgrade it to shared. 17168762Smckusick * An alternative would be to fail with EDEADLK. 17268762Smckusick */ 17368762Smckusick lkp->lk_sharecount++; 17468762Smckusick /* fall into downgrade */ 17568762Smckusick 17668762Smckusick case LK_DOWNGRADE: 17768762Smckusick if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 17868762Smckusick panic("lockmgr: not holding exclusive lock"); 17968762Smckusick lkp->lk_sharecount += lkp->lk_exclusivecount; 18068762Smckusick lkp->lk_exclusivecount = 0; 18168762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 18268762Smckusick lkp->lk_lockholder = LK_NOPROC; 18368800Smckusick if (lkp->lk_waitcount) 18468779Smckusick wakeup((void *)lkp); 18568800Smckusick break; 18668762Smckusick 18768779Smckusick case LK_EXCLUPGRADE: 18868779Smckusick /* 18968779Smckusick * If another process is ahead of us to get an upgrade, 19068779Smckusick * then we want to fail rather than have an intervening 19168779Smckusick * exclusive access. 19268779Smckusick */ 19368779Smckusick if (lkp->lk_flags & LK_WANT_UPGRADE) { 19468779Smckusick lkp->lk_sharecount--; 19568800Smckusick error = EBUSY; 19668800Smckusick break; 19768779Smckusick } 19868779Smckusick /* fall into normal upgrade */ 19968779Smckusick 20068762Smckusick case LK_UPGRADE: 20168762Smckusick /* 20268762Smckusick * Upgrade a shared lock to an exclusive one. If another 20368762Smckusick * shared lock has already requested an upgrade to an 20468762Smckusick * exclusive lock, our shared lock is released and an 20568762Smckusick * exclusive lock is requested (which will be granted 20668775Smckusick * after the upgrade). If we return an error, the file 20768775Smckusick * will always be unlocked. 20868762Smckusick */ 20968762Smckusick if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 21068762Smckusick panic("lockmgr: upgrade exclusive lock"); 21168775Smckusick lkp->lk_sharecount--; 21268762Smckusick /* 21368762Smckusick * If we are just polling, check to see if we will block. 21468762Smckusick */ 21568762Smckusick if ((extflags & LK_NOWAIT) && 21668762Smckusick ((lkp->lk_flags & LK_WANT_UPGRADE) || 21768762Smckusick lkp->lk_sharecount > 1)) { 21868800Smckusick error = EBUSY; 21968800Smckusick break; 22068762Smckusick } 22168762Smckusick if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 22268762Smckusick /* 22368762Smckusick * We are first shared lock to request an upgrade, so 22468762Smckusick * request upgrade and wait for the shared count to 22568762Smckusick * drop to zero, then take exclusive lock. 22668762Smckusick */ 22768762Smckusick lkp->lk_flags |= LK_WANT_UPGRADE; 22868762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 22968762Smckusick lkp->lk_flags &= ~LK_WANT_UPGRADE; 23068800Smckusick if (error) 23168800Smckusick break; 23268762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 23368762Smckusick lkp->lk_lockholder = pid; 23468762Smckusick if (lkp->lk_exclusivecount != 0) 23568762Smckusick panic("lockmgr: non-zero exclusive count"); 23668762Smckusick lkp->lk_exclusivecount = 1; 23768800Smckusick break; 23868762Smckusick } 23968762Smckusick /* 24068762Smckusick * Someone else has requested upgrade. Release our shared 24168762Smckusick * lock, awaken upgrade requestor if we are the last shared 24268762Smckusick * lock, then request an exclusive lock. 24368762Smckusick */ 24468800Smckusick if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) 24568779Smckusick wakeup((void *)lkp); 24668762Smckusick /* fall into exclusive request */ 24768762Smckusick 24868762Smckusick case LK_EXCLUSIVE: 24968762Smckusick if (lkp->lk_lockholder == pid) { 25068762Smckusick /* 25168762Smckusick * Recursive lock. 25268762Smckusick */ 25368762Smckusick if ((extflags & LK_CANRECURSE) == 0) 25468762Smckusick panic("lockmgr: locking against myself"); 25568762Smckusick lkp->lk_exclusivecount++; 25668800Smckusick break; 25768762Smckusick } 25868762Smckusick /* 25968762Smckusick * If we are just polling, check to see if we will sleep. 26068762Smckusick */ 26168762Smckusick if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 26268762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 26368762Smckusick lkp->lk_sharecount != 0)) { 26468800Smckusick error = EBUSY; 26568800Smckusick break; 26668762Smckusick } 26768762Smckusick /* 26868762Smckusick * Try to acquire the want_exclusive flag. 26968762Smckusick */ 27068762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 27168762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL)); 27268800Smckusick if (error) 27368800Smckusick break; 27468762Smckusick lkp->lk_flags |= LK_WANT_EXCL; 27568762Smckusick /* 27668762Smckusick * Wait for shared locks and upgrades to finish. 27768762Smckusick */ 27868762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 27968762Smckusick (lkp->lk_flags & LK_WANT_UPGRADE)); 28068762Smckusick lkp->lk_flags &= ~LK_WANT_EXCL; 28168800Smckusick if (error) 28268800Smckusick break; 28368762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 28468762Smckusick lkp->lk_lockholder = pid; 28568762Smckusick if (lkp->lk_exclusivecount != 0) 28668762Smckusick panic("lockmgr: non-zero exclusive count"); 28768762Smckusick lkp->lk_exclusivecount = 1; 28868800Smckusick break; 28968762Smckusick 29068762Smckusick case LK_RELEASE: 29168762Smckusick if (lkp->lk_exclusivecount != 0) { 29268800Smckusick if (pid != lkp->lk_lockholder) 29368800Smckusick panic("lockmgr: pid %d, not %s %d unlocking", 29468800Smckusick pid, "exclusive lock holder", 29568800Smckusick lkp->lk_lockholder); 29668762Smckusick lkp->lk_exclusivecount--; 29768762Smckusick if (lkp->lk_exclusivecount == 0) { 29868762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 29968762Smckusick lkp->lk_lockholder = LK_NOPROC; 30068762Smckusick } 30168762Smckusick } else if (lkp->lk_sharecount != 0) 30268762Smckusick lkp->lk_sharecount--; 30368800Smckusick if (lkp->lk_waitcount) 30468779Smckusick wakeup((void *)lkp); 30568800Smckusick break; 30668800Smckusick 30768800Smckusick case LK_DRAIN: 30868800Smckusick /* 30968800Smckusick * If we are just polling, check to see if we will sleep. 31068800Smckusick */ 31168800Smckusick if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 31268800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 31368800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 31468800Smckusick error = EBUSY; 31568800Smckusick break; 31668762Smckusick } 31768800Smckusick PAUSE(lkp, ((lkp->lk_flags & 31868800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 31968800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); 32068800Smckusick for (error = 0; ((lkp->lk_flags & 32168800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 32268800Smckusick lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { 32368800Smckusick lkp->lk_flags |= LK_WAITDRAIN; 32468933Smckusick simple_unlock(&lkp->lk_interlock); 32568800Smckusick if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, 32668800Smckusick lkp->lk_wmesg, lkp->lk_timo)) 32768800Smckusick return (error); 32868800Smckusick if ((extflags) & LK_SLEEPFAIL) 32968800Smckusick return (ENOLCK); 33068933Smckusick simple_lock(&lkp->lk_interlock); 33168800Smckusick } 332*68945Smckusick lkp->lk_flags |= LK_DRAINED | LK_HAVE_EXCL; 333*68945Smckusick lkp->lk_lockholder = pid; 334*68945Smckusick lkp->lk_exclusivecount = 1; 33568800Smckusick break; 33668762Smckusick 33768762Smckusick default: 33868933Smckusick simple_unlock(&lkp->lk_interlock); 33968762Smckusick panic("lockmgr: unknown locktype request %d", 34068762Smckusick flags & LK_TYPE_MASK); 34168775Smckusick /* NOTREACHED */ 34268762Smckusick } 34368800Smckusick if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & 34468800Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 34568800Smckusick lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 34668800Smckusick lkp->lk_flags &= ~LK_WAITDRAIN; 34768800Smckusick wakeup((void *)&lkp->lk_flags); 34868800Smckusick } 34968933Smckusick simple_unlock(&lkp->lk_interlock); 35068800Smckusick return (error); 35168762Smckusick } 352*68945Smckusick 353*68945Smckusick lockmgr_printinfo(lkp) 354*68945Smckusick struct lock *lkp; 355*68945Smckusick { 356*68945Smckusick 357*68945Smckusick if (lkp->lk_sharecount) 358*68945Smckusick printf(" lock type %s: SHARED", lkp->lk_wmesg); 359*68945Smckusick else if (lkp->lk_flags & LK_HAVE_EXCL) 360*68945Smckusick printf(" lock type %s: EXCL by pid %d", lkp->lk_wmesg, 361*68945Smckusick lkp->lk_lockholder); 362*68945Smckusick if (lkp->lk_waitcount > 0) 363*68945Smckusick printf(" with %d pending", lkp->lk_waitcount); 364*68945Smckusick } 365