xref: /csrg-svn/sys/kern/kern_lock.c (revision 68775)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.2 (Berkeley) 04/10/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #if NCPUS > 1
24 
25 /*
26  * For multiprocessor system, try spin lock first.
27  *
28  * This should be inline expanded below, but we cannot have #if
29  * inside a multiline define.
30  */
31 int lock_wait_time = 100;
32 #define PAUSE(lkp, wanted)						\
33 		if (lock_wait_time > 0) {				\
34 			int i;						\
35 									\
36 			atomic_unlock(&lkp->lk_interlock);		\
37 			for (i = lock_wait_time; i > 0; i--)		\
38 				if (!(wanted))				\
39 					break;				\
40 			atomic_lock(&lkp->lk_interlock);		\
41 		}							\
42 		if (!(wanted))						\
43 			break;
44 
45 #else /* NCPUS == 1 */
46 
47 /*
48  * It is an error to spin on a uniprocessor as nothing will ever cause
49  * the atomic lock to clear while we are executing.
50  */
51 #define PAUSE(lkp, wanted)
52 
53 #endif /* NCPUS == 1 */
54 
55 /*
56  * Acquire a resource.
57  */
58 #define ACQUIRE(lkp, error, extflags, wanted)				\
59 	PAUSE(lkp, wanted);						\
60 	for (error = 0; wanted; ) {					\
61 		(lkp)->lk_flags |= LK_WAITING;				\
62 		atomic_unlock(&(lkp)->lk_interlock);			\
63 		error = tsleep(lkp, (lkp)->lk_prio, (lkp)->lk_wmesg,	\
64 		    (lkp)->lk_timo);					\
65 		atomic_lock(&(lkp)->lk_interlock);			\
66 		(lkp)->lk_flags |= LK_SLEPT;				\
67 		if (error)						\
68 			break;						\
69 		if ((extflags) & LK_SLEEPFAIL) {			\
70 			error = ENOLCK;					\
71 			break;						\
72 		}							\
73 	}
74 
75 /*
76  * Initialize a lock; required before use.
77  */
78 void lock_init(lkp, prio, wmesg, timo, flags)
79 	struct lock *lkp;
80 	int prio;
81 	char *wmesg;
82 	int timo;
83 	int flags;
84 {
85 	bzero(lkp, sizeof(struct lock));
86 	atomic_lock_init(&lkp->lk_interlock);
87 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
88 	lkp->lk_prio = prio;
89 	lkp->lk_timo = timo;
90 	lkp->lk_wmesg = wmesg;
91 	lkp->lk_lockholder = LK_NOPROC;
92 }
93 
94 /*
95  * Set, change, or release a lock.
96  *
97  * Shared requests increment the shared count. Exclusive requests set the
98  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
99  * accepted shared locks and shared-to-exclusive upgrades to go away.
100  */
101 lockmgr(lkp, p, flags)
102 	struct lock *lkp;
103 	struct proc *p;
104 	int flags;
105 {
106 	pid_t pid;
107 	int error, extflags;
108 
109 	pid = p->p_pid;
110 	atomic_lock(&lkp->lk_interlock);
111 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
112 	lkp->lk_flags &= ~LK_SLEPT;
113 
114 	switch (flags & LK_TYPE_MASK) {
115 
116 	case LK_SHARED:
117 		if (lkp->lk_lockholder != pid) {
118 			/*
119 			 * If just polling, check to see if we will block.
120 			 */
121 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
122 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
123 				atomic_unlock(&lkp->lk_interlock);
124 				return (EBUSY);
125 			}
126 			/*
127 			 * Wait for exclusive locks and upgrades to clear.
128 			 */
129 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
130 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
131 			if (error) {
132 				atomic_unlock(&lkp->lk_interlock);
133 				return (error);
134 			}
135 			lkp->lk_sharecount++;
136 			atomic_unlock(&lkp->lk_interlock);
137 			return (0);
138 		}
139 		/*
140 		 * We hold an exclusive lock, so downgrade it to shared.
141 		 * An alternative would be to fail with EDEADLK.
142 		 */
143 		lkp->lk_sharecount++;
144 		/* fall into downgrade */
145 
146 	case LK_DOWNGRADE:
147 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
148 			panic("lockmgr: not holding exclusive lock");
149 		lkp->lk_sharecount += lkp->lk_exclusivecount;
150 		lkp->lk_exclusivecount = 0;
151 		lkp->lk_flags &= ~LK_HAVE_EXCL;
152 		lkp->lk_lockholder = LK_NOPROC;
153 		if (lkp->lk_flags & LK_WAITING) {
154 			lkp->lk_flags &= ~LK_WAITING;
155 			wakeup(lkp);
156 		}
157 		atomic_unlock(&lkp->lk_interlock);
158 		return (0);
159 
160 	case LK_UPGRADE:
161 		/*
162 		 * Upgrade a shared lock to an exclusive one. If another
163 		 * shared lock has already requested an upgrade to an
164 		 * exclusive lock, our shared lock is released and an
165 		 * exclusive lock is requested (which will be granted
166 		 * after the upgrade). If we return an error, the file
167 		 * will always be unlocked.
168 		 */
169 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
170 			panic("lockmgr: upgrade exclusive lock");
171 		lkp->lk_sharecount--;
172 		/*
173 		 * If we are just polling, check to see if we will block.
174 		 */
175 		if ((extflags & LK_NOWAIT) &&
176 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
177 		     lkp->lk_sharecount > 1)) {
178 			atomic_unlock(&lkp->lk_interlock);
179 			return (EBUSY);
180 		}
181 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
182 			/*
183 			 * We are first shared lock to request an upgrade, so
184 			 * request upgrade and wait for the shared count to
185 			 * drop to zero, then take exclusive lock.
186 			 */
187 			lkp->lk_flags |= LK_WANT_UPGRADE;
188 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
189 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
190 			if (error) {
191 				atomic_unlock(&lkp->lk_interlock);
192 				return (error);
193 			}
194 			lkp->lk_flags |= LK_HAVE_EXCL;
195 			lkp->lk_lockholder = pid;
196 			if (lkp->lk_exclusivecount != 0)
197 				panic("lockmgr: non-zero exclusive count");
198 			lkp->lk_exclusivecount = 1;
199 			atomic_unlock(&lkp->lk_interlock);
200 			return (0);
201 		}
202 		/*
203 		 * Someone else has requested upgrade. Release our shared
204 		 * lock, awaken upgrade requestor if we are the last shared
205 		 * lock, then request an exclusive lock.
206 		 */
207 		if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) {
208 			lkp->lk_flags &= ~LK_WAITING;
209 			wakeup(lkp);
210 		}
211 		/* fall into exclusive request */
212 
213 	case LK_EXCLUSIVE:
214 		if (lkp->lk_lockholder == pid) {
215 			/*
216 			 *	Recursive lock.
217 			 */
218 			if ((extflags & LK_CANRECURSE) == 0)
219 				panic("lockmgr: locking against myself");
220 			lkp->lk_exclusivecount++;
221 			atomic_unlock(&lkp->lk_interlock);
222 			return (0);
223 		}
224 		/*
225 		 * If we are just polling, check to see if we will sleep.
226 		 */
227 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
228 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
229 		     lkp->lk_sharecount != 0)) {
230 			atomic_unlock(&lkp->lk_interlock);
231 			return (EBUSY);
232 		}
233 		/*
234 		 * Try to acquire the want_exclusive flag.
235 		 */
236 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
237 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
238 		if (error) {
239 			atomic_unlock(&lkp->lk_interlock);
240 			return (error);
241 		}
242 		lkp->lk_flags |= LK_WANT_EXCL;
243 		/*
244 		 * Wait for shared locks and upgrades to finish.
245 		 */
246 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
247 		       (lkp->lk_flags & LK_WANT_UPGRADE));
248 		lkp->lk_flags &= ~LK_WANT_EXCL;
249 		if (error) {
250 			atomic_unlock(&lkp->lk_interlock);
251 			return (error);
252 		}
253 		lkp->lk_flags |= LK_HAVE_EXCL;
254 		lkp->lk_lockholder = pid;
255 		if (lkp->lk_exclusivecount != 0)
256 			panic("lockmgr: non-zero exclusive count");
257 		lkp->lk_exclusivecount = 1;
258 		atomic_unlock(&lkp->lk_interlock);
259 		return (0);
260 
261 	case LK_RELEASE:
262 		if (lkp->lk_exclusivecount != 0) {
263 			lkp->lk_exclusivecount--;
264 			if (lkp->lk_exclusivecount == 0) {
265 				lkp->lk_flags &= ~LK_HAVE_EXCL;
266 				lkp->lk_lockholder = LK_NOPROC;
267 			}
268 		} else if (lkp->lk_sharecount != 0)
269 			lkp->lk_sharecount--;
270 		if (lkp->lk_flags & LK_WAITING) {
271 			lkp->lk_flags &= ~LK_WAITING;
272 			wakeup(lkp);
273 		}
274 		atomic_unlock(&lkp->lk_interlock);
275 		return (0);
276 
277 	default:
278 		atomic_unlock(&lkp->lk_interlock);
279 		panic("lockmgr: unknown locktype request %d",
280 		    flags & LK_TYPE_MASK);
281 		/* NOTREACHED */
282 	}
283 }
284