xref: /csrg-svn/sys/kern/kern_lock.c (revision 68779)
168762Smckusick /*
268762Smckusick  * Copyright (c) 1995
368762Smckusick  *	The Regents of the University of California.  All rights reserved.
468762Smckusick  *
568775Smckusick  * This code contains ideas from software contributed to Berkeley by
668762Smckusick  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
768762Smckusick  * System project at Carnegie-Mellon University.
868762Smckusick  *
968762Smckusick  * %sccs.include.redist.c%
1068762Smckusick  *
11*68779Smckusick  *	@(#)kern_lock.c	8.3 (Berkeley) 04/11/95
1268762Smckusick  */
1368762Smckusick 
1468762Smckusick #include <sys/param.h>
1568762Smckusick #include <sys/proc.h>
1668762Smckusick #include <sys/lock.h>
1768762Smckusick 
1868762Smckusick /*
1968762Smckusick  * Locking primitives implementation.
2068762Smckusick  * Locks provide shared/exclusive sychronization.
2168762Smckusick  */
2268762Smckusick 
2368762Smckusick #if NCPUS > 1
2468762Smckusick 
2568762Smckusick /*
2668762Smckusick  * For multiprocessor system, try spin lock first.
2768762Smckusick  *
2868762Smckusick  * This should be inline expanded below, but we cannot have #if
2968762Smckusick  * inside a multiline define.
3068762Smckusick  */
3168762Smckusick int lock_wait_time = 100;
3268762Smckusick #define PAUSE(lkp, wanted)						\
3368762Smckusick 		if (lock_wait_time > 0) {				\
3468762Smckusick 			int i;						\
3568762Smckusick 									\
3668762Smckusick 			atomic_unlock(&lkp->lk_interlock);		\
3768762Smckusick 			for (i = lock_wait_time; i > 0; i--)		\
3868762Smckusick 				if (!(wanted))				\
3968762Smckusick 					break;				\
4068762Smckusick 			atomic_lock(&lkp->lk_interlock);		\
4168762Smckusick 		}							\
4268762Smckusick 		if (!(wanted))						\
4368762Smckusick 			break;
4468762Smckusick 
4568762Smckusick #else /* NCPUS == 1 */
4668762Smckusick 
4768762Smckusick /*
4868762Smckusick  * It is an error to spin on a uniprocessor as nothing will ever cause
4968762Smckusick  * the atomic lock to clear while we are executing.
5068762Smckusick  */
5168762Smckusick #define PAUSE(lkp, wanted)
5268762Smckusick 
5368762Smckusick #endif /* NCPUS == 1 */
5468762Smckusick 
5568762Smckusick /*
5668762Smckusick  * Acquire a resource.
5768762Smckusick  */
5868762Smckusick #define ACQUIRE(lkp, error, extflags, wanted)				\
5968762Smckusick 	PAUSE(lkp, wanted);						\
6068762Smckusick 	for (error = 0; wanted; ) {					\
6168762Smckusick 		(lkp)->lk_flags |= LK_WAITING;				\
6268762Smckusick 		atomic_unlock(&(lkp)->lk_interlock);			\
63*68779Smckusick 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
64*68779Smckusick 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
6568762Smckusick 		atomic_lock(&(lkp)->lk_interlock);			\
6668762Smckusick 		if (error)						\
6768762Smckusick 			break;						\
6868762Smckusick 		if ((extflags) & LK_SLEEPFAIL) {			\
6968762Smckusick 			error = ENOLCK;					\
7068762Smckusick 			break;						\
7168762Smckusick 		}							\
7268762Smckusick 	}
7368762Smckusick 
7468762Smckusick /*
7568762Smckusick  * Initialize a lock; required before use.
7668762Smckusick  */
7768762Smckusick void lock_init(lkp, prio, wmesg, timo, flags)
7868762Smckusick 	struct lock *lkp;
7968762Smckusick 	int prio;
8068762Smckusick 	char *wmesg;
8168762Smckusick 	int timo;
8268762Smckusick 	int flags;
8368762Smckusick {
8468762Smckusick 	bzero(lkp, sizeof(struct lock));
8568762Smckusick 	atomic_lock_init(&lkp->lk_interlock);
8668762Smckusick 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
8768762Smckusick 	lkp->lk_prio = prio;
8868762Smckusick 	lkp->lk_timo = timo;
8968762Smckusick 	lkp->lk_wmesg = wmesg;
9068762Smckusick 	lkp->lk_lockholder = LK_NOPROC;
9168762Smckusick }
9268762Smckusick 
9368762Smckusick /*
9468762Smckusick  * Set, change, or release a lock.
9568762Smckusick  *
9668762Smckusick  * Shared requests increment the shared count. Exclusive requests set the
9768762Smckusick  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
9868762Smckusick  * accepted shared locks and shared-to-exclusive upgrades to go away.
9968762Smckusick  */
10068762Smckusick lockmgr(lkp, p, flags)
101*68779Smckusick 	volatile struct lock *lkp;
10268762Smckusick 	struct proc *p;
103*68779Smckusick 	u_int flags;
10468762Smckusick {
105*68779Smckusick 	int error;
10668762Smckusick 	pid_t pid;
107*68779Smckusick 	volatile int extflags;
10868762Smckusick 
10968762Smckusick 	pid = p->p_pid;
11068775Smckusick 	atomic_lock(&lkp->lk_interlock);
11168762Smckusick 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
11268762Smckusick 
11368762Smckusick 	switch (flags & LK_TYPE_MASK) {
11468762Smckusick 
11568762Smckusick 	case LK_SHARED:
11668762Smckusick 		if (lkp->lk_lockholder != pid) {
11768762Smckusick 			/*
11868762Smckusick 			 * If just polling, check to see if we will block.
11968762Smckusick 			 */
12068762Smckusick 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
12168762Smckusick 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
12268762Smckusick 				atomic_unlock(&lkp->lk_interlock);
12368762Smckusick 				return (EBUSY);
12468762Smckusick 			}
12568762Smckusick 			/*
12668762Smckusick 			 * Wait for exclusive locks and upgrades to clear.
12768762Smckusick 			 */
12868762Smckusick 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
12968762Smckusick 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
13068762Smckusick 			if (error) {
13168762Smckusick 				atomic_unlock(&lkp->lk_interlock);
13268762Smckusick 				return (error);
13368762Smckusick 			}
13468762Smckusick 			lkp->lk_sharecount++;
13568762Smckusick 			atomic_unlock(&lkp->lk_interlock);
13668762Smckusick 			return (0);
13768762Smckusick 		}
13868762Smckusick 		/*
13968762Smckusick 		 * We hold an exclusive lock, so downgrade it to shared.
14068762Smckusick 		 * An alternative would be to fail with EDEADLK.
14168762Smckusick 		 */
14268762Smckusick 		lkp->lk_sharecount++;
14368762Smckusick 		/* fall into downgrade */
14468762Smckusick 
14568762Smckusick 	case LK_DOWNGRADE:
14668762Smckusick 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
14768762Smckusick 			panic("lockmgr: not holding exclusive lock");
14868762Smckusick 		lkp->lk_sharecount += lkp->lk_exclusivecount;
14968762Smckusick 		lkp->lk_exclusivecount = 0;
15068762Smckusick 		lkp->lk_flags &= ~LK_HAVE_EXCL;
15168762Smckusick 		lkp->lk_lockholder = LK_NOPROC;
15268762Smckusick 		if (lkp->lk_flags & LK_WAITING) {
15368762Smckusick 			lkp->lk_flags &= ~LK_WAITING;
154*68779Smckusick 			wakeup((void *)lkp);
15568762Smckusick 		}
15668762Smckusick 		atomic_unlock(&lkp->lk_interlock);
15768762Smckusick 		return (0);
15868762Smckusick 
159*68779Smckusick 	case LK_EXCLUPGRADE:
160*68779Smckusick 		/*
161*68779Smckusick 		 * If another process is ahead of us to get an upgrade,
162*68779Smckusick 		 * then we want to fail rather than have an intervening
163*68779Smckusick 		 * exclusive access.
164*68779Smckusick 		 */
165*68779Smckusick 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
166*68779Smckusick 			lkp->lk_sharecount--;
167*68779Smckusick 			atomic_unlock(&lkp->lk_interlock);
168*68779Smckusick 			return (EBUSY);
169*68779Smckusick 		}
170*68779Smckusick 		/* fall into normal upgrade */
171*68779Smckusick 
17268762Smckusick 	case LK_UPGRADE:
17368762Smckusick 		/*
17468762Smckusick 		 * Upgrade a shared lock to an exclusive one. If another
17568762Smckusick 		 * shared lock has already requested an upgrade to an
17668762Smckusick 		 * exclusive lock, our shared lock is released and an
17768762Smckusick 		 * exclusive lock is requested (which will be granted
17868775Smckusick 		 * after the upgrade). If we return an error, the file
17968775Smckusick 		 * will always be unlocked.
18068762Smckusick 		 */
18168762Smckusick 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
18268762Smckusick 			panic("lockmgr: upgrade exclusive lock");
18368775Smckusick 		lkp->lk_sharecount--;
18468762Smckusick 		/*
18568762Smckusick 		 * If we are just polling, check to see if we will block.
18668762Smckusick 		 */
18768762Smckusick 		if ((extflags & LK_NOWAIT) &&
18868762Smckusick 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
18968762Smckusick 		     lkp->lk_sharecount > 1)) {
19068762Smckusick 			atomic_unlock(&lkp->lk_interlock);
19168762Smckusick 			return (EBUSY);
19268762Smckusick 		}
19368762Smckusick 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
19468762Smckusick 			/*
19568762Smckusick 			 * We are first shared lock to request an upgrade, so
19668762Smckusick 			 * request upgrade and wait for the shared count to
19768762Smckusick 			 * drop to zero, then take exclusive lock.
19868762Smckusick 			 */
19968762Smckusick 			lkp->lk_flags |= LK_WANT_UPGRADE;
20068762Smckusick 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
20168762Smckusick 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
20268762Smckusick 			if (error) {
20368762Smckusick 				atomic_unlock(&lkp->lk_interlock);
20468762Smckusick 				return (error);
20568762Smckusick 			}
20668762Smckusick 			lkp->lk_flags |= LK_HAVE_EXCL;
20768762Smckusick 			lkp->lk_lockholder = pid;
20868762Smckusick 			if (lkp->lk_exclusivecount != 0)
20968762Smckusick 				panic("lockmgr: non-zero exclusive count");
21068762Smckusick 			lkp->lk_exclusivecount = 1;
21168762Smckusick 			atomic_unlock(&lkp->lk_interlock);
21268762Smckusick 			return (0);
21368762Smckusick 		}
21468762Smckusick 		/*
21568762Smckusick 		 * Someone else has requested upgrade. Release our shared
21668762Smckusick 		 * lock, awaken upgrade requestor if we are the last shared
21768762Smckusick 		 * lock, then request an exclusive lock.
21868762Smckusick 		 */
21968762Smckusick 		if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) {
22068762Smckusick 			lkp->lk_flags &= ~LK_WAITING;
221*68779Smckusick 			wakeup((void *)lkp);
22268762Smckusick 		}
22368762Smckusick 		/* fall into exclusive request */
22468762Smckusick 
22568762Smckusick 	case LK_EXCLUSIVE:
22668762Smckusick 		if (lkp->lk_lockholder == pid) {
22768762Smckusick 			/*
22868762Smckusick 			 *	Recursive lock.
22968762Smckusick 			 */
23068762Smckusick 			if ((extflags & LK_CANRECURSE) == 0)
23168762Smckusick 				panic("lockmgr: locking against myself");
23268762Smckusick 			lkp->lk_exclusivecount++;
23368762Smckusick 			atomic_unlock(&lkp->lk_interlock);
23468762Smckusick 			return (0);
23568762Smckusick 		}
23668762Smckusick 		/*
23768762Smckusick 		 * If we are just polling, check to see if we will sleep.
23868762Smckusick 		 */
23968762Smckusick 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
24068762Smckusick 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
24168762Smckusick 		     lkp->lk_sharecount != 0)) {
24268762Smckusick 			atomic_unlock(&lkp->lk_interlock);
24368762Smckusick 			return (EBUSY);
24468762Smckusick 		}
24568762Smckusick 		/*
24668762Smckusick 		 * Try to acquire the want_exclusive flag.
24768762Smckusick 		 */
24868762Smckusick 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
24968762Smckusick 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
25068762Smckusick 		if (error) {
25168762Smckusick 			atomic_unlock(&lkp->lk_interlock);
25268762Smckusick 			return (error);
25368762Smckusick 		}
25468762Smckusick 		lkp->lk_flags |= LK_WANT_EXCL;
25568762Smckusick 		/*
25668762Smckusick 		 * Wait for shared locks and upgrades to finish.
25768762Smckusick 		 */
25868762Smckusick 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
25968762Smckusick 		       (lkp->lk_flags & LK_WANT_UPGRADE));
26068762Smckusick 		lkp->lk_flags &= ~LK_WANT_EXCL;
26168762Smckusick 		if (error) {
26268762Smckusick 			atomic_unlock(&lkp->lk_interlock);
26368762Smckusick 			return (error);
26468762Smckusick 		}
26568762Smckusick 		lkp->lk_flags |= LK_HAVE_EXCL;
26668762Smckusick 		lkp->lk_lockholder = pid;
26768762Smckusick 		if (lkp->lk_exclusivecount != 0)
26868762Smckusick 			panic("lockmgr: non-zero exclusive count");
26968762Smckusick 		lkp->lk_exclusivecount = 1;
27068762Smckusick 		atomic_unlock(&lkp->lk_interlock);
27168762Smckusick 		return (0);
27268762Smckusick 
27368762Smckusick 	case LK_RELEASE:
27468762Smckusick 		if (lkp->lk_exclusivecount != 0) {
27568762Smckusick 			lkp->lk_exclusivecount--;
27668762Smckusick 			if (lkp->lk_exclusivecount == 0) {
27768762Smckusick 				lkp->lk_flags &= ~LK_HAVE_EXCL;
27868762Smckusick 				lkp->lk_lockholder = LK_NOPROC;
27968762Smckusick 			}
28068762Smckusick 		} else if (lkp->lk_sharecount != 0)
28168762Smckusick 			lkp->lk_sharecount--;
28268762Smckusick 		if (lkp->lk_flags & LK_WAITING) {
28368762Smckusick 			lkp->lk_flags &= ~LK_WAITING;
284*68779Smckusick 			wakeup((void *)lkp);
28568762Smckusick 		}
28668762Smckusick 		atomic_unlock(&lkp->lk_interlock);
28768762Smckusick 		return (0);
28868762Smckusick 
28968762Smckusick 	default:
29068775Smckusick 		atomic_unlock(&lkp->lk_interlock);
29168762Smckusick 		panic("lockmgr: unknown locktype request %d",
29268762Smckusick 		    flags & LK_TYPE_MASK);
29368775Smckusick 		/* NOTREACHED */
29468762Smckusick 	}
29568762Smckusick }
296