168762Smckusick /* 268762Smckusick * Copyright (c) 1995 368762Smckusick * The Regents of the University of California. All rights reserved. 468762Smckusick * 568775Smckusick * This code contains ideas from software contributed to Berkeley by 668762Smckusick * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 768762Smckusick * System project at Carnegie-Mellon University. 868762Smckusick * 968762Smckusick * %sccs.include.redist.c% 1068762Smckusick * 11*68780Smckusick * @(#)kern_lock.c 8.4 (Berkeley) 04/11/95 1268762Smckusick */ 1368762Smckusick 1468762Smckusick #include <sys/param.h> 1568762Smckusick #include <sys/proc.h> 1668762Smckusick #include <sys/lock.h> 1768762Smckusick 1868762Smckusick /* 1968762Smckusick * Locking primitives implementation. 2068762Smckusick * Locks provide shared/exclusive sychronization. 2168762Smckusick */ 2268762Smckusick 2368762Smckusick #if NCPUS > 1 2468762Smckusick 2568762Smckusick /* 2668762Smckusick * For multiprocessor system, try spin lock first. 2768762Smckusick * 2868762Smckusick * This should be inline expanded below, but we cannot have #if 2968762Smckusick * inside a multiline define. 3068762Smckusick */ 3168762Smckusick int lock_wait_time = 100; 3268762Smckusick #define PAUSE(lkp, wanted) \ 3368762Smckusick if (lock_wait_time > 0) { \ 3468762Smckusick int i; \ 3568762Smckusick \ 3668762Smckusick atomic_unlock(&lkp->lk_interlock); \ 3768762Smckusick for (i = lock_wait_time; i > 0; i--) \ 3868762Smckusick if (!(wanted)) \ 3968762Smckusick break; \ 4068762Smckusick atomic_lock(&lkp->lk_interlock); \ 4168762Smckusick } \ 4268762Smckusick if (!(wanted)) \ 4368762Smckusick break; 4468762Smckusick 4568762Smckusick #else /* NCPUS == 1 */ 4668762Smckusick 4768762Smckusick /* 4868762Smckusick * It is an error to spin on a uniprocessor as nothing will ever cause 4968762Smckusick * the atomic lock to clear while we are executing. 5068762Smckusick */ 5168762Smckusick #define PAUSE(lkp, wanted) 5268762Smckusick 5368762Smckusick #endif /* NCPUS == 1 */ 5468762Smckusick 5568762Smckusick /* 5668762Smckusick * Acquire a resource. 5768762Smckusick */ 5868762Smckusick #define ACQUIRE(lkp, error, extflags, wanted) \ 5968762Smckusick PAUSE(lkp, wanted); \ 6068762Smckusick for (error = 0; wanted; ) { \ 6168762Smckusick (lkp)->lk_flags |= LK_WAITING; \ 6268762Smckusick atomic_unlock(&(lkp)->lk_interlock); \ 6368779Smckusick error = tsleep((void *)lkp, (lkp)->lk_prio, \ 6468779Smckusick (lkp)->lk_wmesg, (lkp)->lk_timo); \ 6568762Smckusick atomic_lock(&(lkp)->lk_interlock); \ 6668762Smckusick if (error) \ 6768762Smckusick break; \ 6868762Smckusick if ((extflags) & LK_SLEEPFAIL) { \ 6968762Smckusick error = ENOLCK; \ 7068762Smckusick break; \ 7168762Smckusick } \ 7268762Smckusick } 7368762Smckusick 7468762Smckusick /* 7568762Smckusick * Initialize a lock; required before use. 7668762Smckusick */ 7768762Smckusick void lock_init(lkp, prio, wmesg, timo, flags) 7868762Smckusick struct lock *lkp; 7968762Smckusick int prio; 8068762Smckusick char *wmesg; 8168762Smckusick int timo; 8268762Smckusick int flags; 8368762Smckusick { 8468762Smckusick bzero(lkp, sizeof(struct lock)); 8568762Smckusick atomic_lock_init(&lkp->lk_interlock); 8668762Smckusick lkp->lk_flags = flags & LK_EXTFLG_MASK; 8768762Smckusick lkp->lk_prio = prio; 8868762Smckusick lkp->lk_timo = timo; 8968762Smckusick lkp->lk_wmesg = wmesg; 9068762Smckusick lkp->lk_lockholder = LK_NOPROC; 9168762Smckusick } 9268762Smckusick 9368762Smckusick /* 94*68780Smckusick * Determine the status of a lock. 95*68780Smckusick */ 96*68780Smckusick int 97*68780Smckusick lockstatus(lkp) 98*68780Smckusick struct lock *lkp; 99*68780Smckusick { 100*68780Smckusick int lock_type = 0; 101*68780Smckusick 102*68780Smckusick atomic_lock(&lkp->lk_interlock); 103*68780Smckusick if (lkp->lk_exclusivecount != 0) 104*68780Smckusick lock_type = LK_EXCLUSIVE; 105*68780Smckusick else if (lkp->lk_sharecount != 0) 106*68780Smckusick lock_type = LK_SHARED; 107*68780Smckusick atomic_unlock(&lkp->lk_interlock); 108*68780Smckusick return (lock_type); 109*68780Smckusick } 110*68780Smckusick 111*68780Smckusick /* 11268762Smckusick * Set, change, or release a lock. 11368762Smckusick * 11468762Smckusick * Shared requests increment the shared count. Exclusive requests set the 11568762Smckusick * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 11668762Smckusick * accepted shared locks and shared-to-exclusive upgrades to go away. 11768762Smckusick */ 11868762Smckusick lockmgr(lkp, p, flags) 11968779Smckusick volatile struct lock *lkp; 12068762Smckusick struct proc *p; 12168779Smckusick u_int flags; 12268762Smckusick { 12368779Smckusick int error; 12468762Smckusick pid_t pid; 12568779Smckusick volatile int extflags; 12668762Smckusick 12768762Smckusick pid = p->p_pid; 12868775Smckusick atomic_lock(&lkp->lk_interlock); 12968762Smckusick extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 13068762Smckusick 13168762Smckusick switch (flags & LK_TYPE_MASK) { 13268762Smckusick 13368762Smckusick case LK_SHARED: 13468762Smckusick if (lkp->lk_lockholder != pid) { 13568762Smckusick /* 13668762Smckusick * If just polling, check to see if we will block. 13768762Smckusick */ 13868762Smckusick if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 13968762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 14068762Smckusick atomic_unlock(&lkp->lk_interlock); 14168762Smckusick return (EBUSY); 14268762Smckusick } 14368762Smckusick /* 14468762Smckusick * Wait for exclusive locks and upgrades to clear. 14568762Smckusick */ 14668762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 14768762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 14868762Smckusick if (error) { 14968762Smckusick atomic_unlock(&lkp->lk_interlock); 15068762Smckusick return (error); 15168762Smckusick } 15268762Smckusick lkp->lk_sharecount++; 15368762Smckusick atomic_unlock(&lkp->lk_interlock); 15468762Smckusick return (0); 15568762Smckusick } 15668762Smckusick /* 15768762Smckusick * We hold an exclusive lock, so downgrade it to shared. 15868762Smckusick * An alternative would be to fail with EDEADLK. 15968762Smckusick */ 16068762Smckusick lkp->lk_sharecount++; 16168762Smckusick /* fall into downgrade */ 16268762Smckusick 16368762Smckusick case LK_DOWNGRADE: 16468762Smckusick if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 16568762Smckusick panic("lockmgr: not holding exclusive lock"); 16668762Smckusick lkp->lk_sharecount += lkp->lk_exclusivecount; 16768762Smckusick lkp->lk_exclusivecount = 0; 16868762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 16968762Smckusick lkp->lk_lockholder = LK_NOPROC; 17068762Smckusick if (lkp->lk_flags & LK_WAITING) { 17168762Smckusick lkp->lk_flags &= ~LK_WAITING; 17268779Smckusick wakeup((void *)lkp); 17368762Smckusick } 17468762Smckusick atomic_unlock(&lkp->lk_interlock); 17568762Smckusick return (0); 17668762Smckusick 17768779Smckusick case LK_EXCLUPGRADE: 17868779Smckusick /* 17968779Smckusick * If another process is ahead of us to get an upgrade, 18068779Smckusick * then we want to fail rather than have an intervening 18168779Smckusick * exclusive access. 18268779Smckusick */ 18368779Smckusick if (lkp->lk_flags & LK_WANT_UPGRADE) { 18468779Smckusick lkp->lk_sharecount--; 18568779Smckusick atomic_unlock(&lkp->lk_interlock); 18668779Smckusick return (EBUSY); 18768779Smckusick } 18868779Smckusick /* fall into normal upgrade */ 18968779Smckusick 19068762Smckusick case LK_UPGRADE: 19168762Smckusick /* 19268762Smckusick * Upgrade a shared lock to an exclusive one. If another 19368762Smckusick * shared lock has already requested an upgrade to an 19468762Smckusick * exclusive lock, our shared lock is released and an 19568762Smckusick * exclusive lock is requested (which will be granted 19668775Smckusick * after the upgrade). If we return an error, the file 19768775Smckusick * will always be unlocked. 19868762Smckusick */ 19968762Smckusick if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 20068762Smckusick panic("lockmgr: upgrade exclusive lock"); 20168775Smckusick lkp->lk_sharecount--; 20268762Smckusick /* 20368762Smckusick * If we are just polling, check to see if we will block. 20468762Smckusick */ 20568762Smckusick if ((extflags & LK_NOWAIT) && 20668762Smckusick ((lkp->lk_flags & LK_WANT_UPGRADE) || 20768762Smckusick lkp->lk_sharecount > 1)) { 20868762Smckusick atomic_unlock(&lkp->lk_interlock); 20968762Smckusick return (EBUSY); 21068762Smckusick } 21168762Smckusick if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 21268762Smckusick /* 21368762Smckusick * We are first shared lock to request an upgrade, so 21468762Smckusick * request upgrade and wait for the shared count to 21568762Smckusick * drop to zero, then take exclusive lock. 21668762Smckusick */ 21768762Smckusick lkp->lk_flags |= LK_WANT_UPGRADE; 21868762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 21968762Smckusick lkp->lk_flags &= ~LK_WANT_UPGRADE; 22068762Smckusick if (error) { 22168762Smckusick atomic_unlock(&lkp->lk_interlock); 22268762Smckusick return (error); 22368762Smckusick } 22468762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 22568762Smckusick lkp->lk_lockholder = pid; 22668762Smckusick if (lkp->lk_exclusivecount != 0) 22768762Smckusick panic("lockmgr: non-zero exclusive count"); 22868762Smckusick lkp->lk_exclusivecount = 1; 22968762Smckusick atomic_unlock(&lkp->lk_interlock); 23068762Smckusick return (0); 23168762Smckusick } 23268762Smckusick /* 23368762Smckusick * Someone else has requested upgrade. Release our shared 23468762Smckusick * lock, awaken upgrade requestor if we are the last shared 23568762Smckusick * lock, then request an exclusive lock. 23668762Smckusick */ 23768762Smckusick if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) { 23868762Smckusick lkp->lk_flags &= ~LK_WAITING; 23968779Smckusick wakeup((void *)lkp); 24068762Smckusick } 24168762Smckusick /* fall into exclusive request */ 24268762Smckusick 24368762Smckusick case LK_EXCLUSIVE: 24468762Smckusick if (lkp->lk_lockholder == pid) { 24568762Smckusick /* 24668762Smckusick * Recursive lock. 24768762Smckusick */ 24868762Smckusick if ((extflags & LK_CANRECURSE) == 0) 24968762Smckusick panic("lockmgr: locking against myself"); 25068762Smckusick lkp->lk_exclusivecount++; 25168762Smckusick atomic_unlock(&lkp->lk_interlock); 25268762Smckusick return (0); 25368762Smckusick } 25468762Smckusick /* 25568762Smckusick * If we are just polling, check to see if we will sleep. 25668762Smckusick */ 25768762Smckusick if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 25868762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 25968762Smckusick lkp->lk_sharecount != 0)) { 26068762Smckusick atomic_unlock(&lkp->lk_interlock); 26168762Smckusick return (EBUSY); 26268762Smckusick } 26368762Smckusick /* 26468762Smckusick * Try to acquire the want_exclusive flag. 26568762Smckusick */ 26668762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_flags & 26768762Smckusick (LK_HAVE_EXCL | LK_WANT_EXCL)); 26868762Smckusick if (error) { 26968762Smckusick atomic_unlock(&lkp->lk_interlock); 27068762Smckusick return (error); 27168762Smckusick } 27268762Smckusick lkp->lk_flags |= LK_WANT_EXCL; 27368762Smckusick /* 27468762Smckusick * Wait for shared locks and upgrades to finish. 27568762Smckusick */ 27668762Smckusick ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 27768762Smckusick (lkp->lk_flags & LK_WANT_UPGRADE)); 27868762Smckusick lkp->lk_flags &= ~LK_WANT_EXCL; 27968762Smckusick if (error) { 28068762Smckusick atomic_unlock(&lkp->lk_interlock); 28168762Smckusick return (error); 28268762Smckusick } 28368762Smckusick lkp->lk_flags |= LK_HAVE_EXCL; 28468762Smckusick lkp->lk_lockholder = pid; 28568762Smckusick if (lkp->lk_exclusivecount != 0) 28668762Smckusick panic("lockmgr: non-zero exclusive count"); 28768762Smckusick lkp->lk_exclusivecount = 1; 28868762Smckusick atomic_unlock(&lkp->lk_interlock); 28968762Smckusick return (0); 29068762Smckusick 29168762Smckusick case LK_RELEASE: 29268762Smckusick if (lkp->lk_exclusivecount != 0) { 29368762Smckusick lkp->lk_exclusivecount--; 29468762Smckusick if (lkp->lk_exclusivecount == 0) { 29568762Smckusick lkp->lk_flags &= ~LK_HAVE_EXCL; 29668762Smckusick lkp->lk_lockholder = LK_NOPROC; 29768762Smckusick } 29868762Smckusick } else if (lkp->lk_sharecount != 0) 29968762Smckusick lkp->lk_sharecount--; 30068762Smckusick if (lkp->lk_flags & LK_WAITING) { 30168762Smckusick lkp->lk_flags &= ~LK_WAITING; 30268779Smckusick wakeup((void *)lkp); 30368762Smckusick } 30468762Smckusick atomic_unlock(&lkp->lk_interlock); 30568762Smckusick return (0); 30668762Smckusick 30768762Smckusick default: 30868775Smckusick atomic_unlock(&lkp->lk_interlock); 30968762Smckusick panic("lockmgr: unknown locktype request %d", 31068762Smckusick flags & LK_TYPE_MASK); 31168775Smckusick /* NOTREACHED */ 31268762Smckusick } 31368762Smckusick } 314