xref: /csrg-svn/sys/kern/kern_lock.c (revision 68933)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.7 (Berkeley) 04/27/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #if NCPUS > 1
24 
25 /*
26  * For multiprocessor system, try spin lock first.
27  *
28  * This should be inline expanded below, but we cannot have #if
29  * inside a multiline define.
30  */
31 int lock_wait_time = 100;
32 #define PAUSE(lkp, wanted)						\
33 		if (lock_wait_time > 0) {				\
34 			int i;						\
35 									\
36 			simple_unlock(&lkp->lk_interlock);		\
37 			for (i = lock_wait_time; i > 0; i--)		\
38 				if (!(wanted))				\
39 					break;				\
40 			simple_lock(&lkp->lk_interlock);		\
41 		}							\
42 		if (!(wanted))						\
43 			break;
44 
45 #else /* NCPUS == 1 */
46 
47 /*
48  * It is an error to spin on a uniprocessor as nothing will ever cause
49  * the simple lock to clear while we are executing.
50  */
51 #define PAUSE(lkp, wanted)
52 
53 /*
54  * Panic messages for inline expanded simple locks.
55  * Put text here to avoid hundreds of copies.
56  */
57 const char *simple_lock_held = "simple_lock: lock held";
58 const char *simple_lock_not_held = "simple_lock: lock not held";
59 
60 #endif /* NCPUS == 1 */
61 
62 /*
63  * Acquire a resource.
64  */
65 #define ACQUIRE(lkp, error, extflags, wanted)				\
66 	PAUSE(lkp, wanted);						\
67 	for (error = 0; wanted; ) {					\
68 		(lkp)->lk_waitcount++;					\
69 		simple_unlock(&(lkp)->lk_interlock);			\
70 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
71 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
72 		simple_lock(&(lkp)->lk_interlock);			\
73 		(lkp)->lk_waitcount--;					\
74 		if (error)						\
75 			break;						\
76 		if ((extflags) & LK_SLEEPFAIL) {			\
77 			error = ENOLCK;					\
78 			break;						\
79 		}							\
80 	}
81 
82 /*
83  * Initialize a lock; required before use.
84  */
85 void
86 lock_init(lkp, prio, wmesg, timo, flags)
87 	struct lock *lkp;
88 	int prio;
89 	char *wmesg;
90 	int timo;
91 	int flags;
92 {
93 	bzero(lkp, sizeof(struct lock));
94 	simple_lock_init(&lkp->lk_interlock);
95 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
96 	lkp->lk_prio = prio;
97 	lkp->lk_timo = timo;
98 	lkp->lk_wmesg = wmesg;
99 	lkp->lk_lockholder = LK_NOPROC;
100 }
101 
102 /*
103  * Determine the status of a lock.
104  */
105 int
106 lockstatus(lkp)
107 	struct lock *lkp;
108 {
109 	int lock_type = 0;
110 
111 	simple_lock(&lkp->lk_interlock);
112 	if (lkp->lk_exclusivecount != 0)
113 		lock_type = LK_EXCLUSIVE;
114 	else if (lkp->lk_sharecount != 0)
115 		lock_type = LK_SHARED;
116 	simple_unlock(&lkp->lk_interlock);
117 	return (lock_type);
118 }
119 
120 /*
121  * Set, change, or release a lock.
122  *
123  * Shared requests increment the shared count. Exclusive requests set the
124  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
125  * accepted shared locks and shared-to-exclusive upgrades to go away.
126  */
127 int
128 lockmgr(lkp, flags, p)
129 	volatile struct lock *lkp;
130 	u_int flags;
131 	struct proc *p;
132 {
133 	int error;
134 	pid_t pid;
135 	volatile int extflags;
136 
137 	error = 0;
138 	pid = p->p_pid;
139 	simple_lock(&lkp->lk_interlock);
140 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
141 	if (lkp->lk_flags & LK_DRAINED)
142 		panic("lockmgr: using decommissioned lock");
143 
144 	switch (flags & LK_TYPE_MASK) {
145 
146 	case LK_SHARED:
147 		if (lkp->lk_lockholder != pid) {
148 			/*
149 			 * If just polling, check to see if we will block.
150 			 */
151 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
152 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
153 				error = EBUSY;
154 				break;
155 			}
156 			/*
157 			 * Wait for exclusive locks and upgrades to clear.
158 			 */
159 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
160 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
161 			if (error)
162 				break;
163 			lkp->lk_sharecount++;
164 			break;
165 		}
166 		/*
167 		 * We hold an exclusive lock, so downgrade it to shared.
168 		 * An alternative would be to fail with EDEADLK.
169 		 */
170 		lkp->lk_sharecount++;
171 		/* fall into downgrade */
172 
173 	case LK_DOWNGRADE:
174 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
175 			panic("lockmgr: not holding exclusive lock");
176 		lkp->lk_sharecount += lkp->lk_exclusivecount;
177 		lkp->lk_exclusivecount = 0;
178 		lkp->lk_flags &= ~LK_HAVE_EXCL;
179 		lkp->lk_lockholder = LK_NOPROC;
180 		if (lkp->lk_waitcount)
181 			wakeup((void *)lkp);
182 		break;
183 
184 	case LK_EXCLUPGRADE:
185 		/*
186 		 * If another process is ahead of us to get an upgrade,
187 		 * then we want to fail rather than have an intervening
188 		 * exclusive access.
189 		 */
190 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
191 			lkp->lk_sharecount--;
192 			error = EBUSY;
193 			break;
194 		}
195 		/* fall into normal upgrade */
196 
197 	case LK_UPGRADE:
198 		/*
199 		 * Upgrade a shared lock to an exclusive one. If another
200 		 * shared lock has already requested an upgrade to an
201 		 * exclusive lock, our shared lock is released and an
202 		 * exclusive lock is requested (which will be granted
203 		 * after the upgrade). If we return an error, the file
204 		 * will always be unlocked.
205 		 */
206 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
207 			panic("lockmgr: upgrade exclusive lock");
208 		lkp->lk_sharecount--;
209 		/*
210 		 * If we are just polling, check to see if we will block.
211 		 */
212 		if ((extflags & LK_NOWAIT) &&
213 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
214 		     lkp->lk_sharecount > 1)) {
215 			error = EBUSY;
216 			break;
217 		}
218 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
219 			/*
220 			 * We are first shared lock to request an upgrade, so
221 			 * request upgrade and wait for the shared count to
222 			 * drop to zero, then take exclusive lock.
223 			 */
224 			lkp->lk_flags |= LK_WANT_UPGRADE;
225 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
226 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
227 			if (error)
228 				break;
229 			lkp->lk_flags |= LK_HAVE_EXCL;
230 			lkp->lk_lockholder = pid;
231 			if (lkp->lk_exclusivecount != 0)
232 				panic("lockmgr: non-zero exclusive count");
233 			lkp->lk_exclusivecount = 1;
234 			break;
235 		}
236 		/*
237 		 * Someone else has requested upgrade. Release our shared
238 		 * lock, awaken upgrade requestor if we are the last shared
239 		 * lock, then request an exclusive lock.
240 		 */
241 		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
242 			wakeup((void *)lkp);
243 		/* fall into exclusive request */
244 
245 	case LK_EXCLUSIVE:
246 		if (lkp->lk_lockholder == pid) {
247 			/*
248 			 *	Recursive lock.
249 			 */
250 			if ((extflags & LK_CANRECURSE) == 0)
251 				panic("lockmgr: locking against myself");
252 			lkp->lk_exclusivecount++;
253 			break;
254 		}
255 		/*
256 		 * If we are just polling, check to see if we will sleep.
257 		 */
258 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
259 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
260 		     lkp->lk_sharecount != 0)) {
261 			error = EBUSY;
262 			break;
263 		}
264 		/*
265 		 * Try to acquire the want_exclusive flag.
266 		 */
267 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
268 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
269 		if (error)
270 			break;
271 		lkp->lk_flags |= LK_WANT_EXCL;
272 		/*
273 		 * Wait for shared locks and upgrades to finish.
274 		 */
275 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
276 		       (lkp->lk_flags & LK_WANT_UPGRADE));
277 		lkp->lk_flags &= ~LK_WANT_EXCL;
278 		if (error)
279 			break;
280 		lkp->lk_flags |= LK_HAVE_EXCL;
281 		lkp->lk_lockholder = pid;
282 		if (lkp->lk_exclusivecount != 0)
283 			panic("lockmgr: non-zero exclusive count");
284 		lkp->lk_exclusivecount = 1;
285 		break;
286 
287 	case LK_RELEASE:
288 		if (lkp->lk_exclusivecount != 0) {
289 			if (pid != lkp->lk_lockholder)
290 				panic("lockmgr: pid %d, not %s %d unlocking",
291 				    pid, "exclusive lock holder",
292 				    lkp->lk_lockholder);
293 			lkp->lk_exclusivecount--;
294 			if (lkp->lk_exclusivecount == 0) {
295 				lkp->lk_flags &= ~LK_HAVE_EXCL;
296 				lkp->lk_lockholder = LK_NOPROC;
297 			}
298 		} else if (lkp->lk_sharecount != 0)
299 			lkp->lk_sharecount--;
300 		if (lkp->lk_waitcount)
301 			wakeup((void *)lkp);
302 		break;
303 
304 	case LK_DRAIN:
305 		/*
306 		 * If we are just polling, check to see if we will sleep.
307 		 */
308 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
309 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
310 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
311 			error = EBUSY;
312 			break;
313 		}
314 		PAUSE(lkp, ((lkp->lk_flags &
315 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
316 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
317 		for (error = 0; ((lkp->lk_flags &
318 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
319 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
320 			lkp->lk_flags |= LK_WAITDRAIN;
321 			simple_unlock(&lkp->lk_interlock);
322 			if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
323 			    lkp->lk_wmesg, lkp->lk_timo))
324 				return (error);
325 			if ((extflags) & LK_SLEEPFAIL)
326 				return (ENOLCK);
327 			simple_lock(&lkp->lk_interlock);
328 		}
329 		lkp->lk_flags |= LK_DRAINED;
330 		break;
331 
332 	default:
333 		simple_unlock(&lkp->lk_interlock);
334 		panic("lockmgr: unknown locktype request %d",
335 		    flags & LK_TYPE_MASK);
336 		/* NOTREACHED */
337 	}
338 	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
339 	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
340 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
341 		lkp->lk_flags &= ~LK_WAITDRAIN;
342 		wakeup((void *)&lkp->lk_flags);
343 	}
344 	simple_unlock(&lkp->lk_interlock);
345 	return (error);
346 }
347