xref: /csrg-svn/sys/kern/kern_lock.c (revision 68800)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.6 (Berkeley) 04/13/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #if NCPUS > 1
24 
25 /*
26  * For multiprocessor system, try spin lock first.
27  *
28  * This should be inline expanded below, but we cannot have #if
29  * inside a multiline define.
30  */
31 int lock_wait_time = 100;
32 #define PAUSE(lkp, wanted)						\
33 		if (lock_wait_time > 0) {				\
34 			int i;						\
35 									\
36 			atomic_unlock(&lkp->lk_interlock);		\
37 			for (i = lock_wait_time; i > 0; i--)		\
38 				if (!(wanted))				\
39 					break;				\
40 			atomic_lock(&lkp->lk_interlock);		\
41 		}							\
42 		if (!(wanted))						\
43 			break;
44 
45 #else /* NCPUS == 1 */
46 
47 /*
48  * It is an error to spin on a uniprocessor as nothing will ever cause
49  * the atomic lock to clear while we are executing.
50  */
51 #define PAUSE(lkp, wanted)
52 
53 #endif /* NCPUS == 1 */
54 
55 /*
56  * Acquire a resource.
57  */
58 #define ACQUIRE(lkp, error, extflags, wanted)				\
59 	PAUSE(lkp, wanted);						\
60 	for (error = 0; wanted; ) {					\
61 		(lkp)->lk_waitcount++;					\
62 		atomic_unlock(&(lkp)->lk_interlock);			\
63 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
64 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
65 		atomic_lock(&(lkp)->lk_interlock);			\
66 		(lkp)->lk_waitcount--;					\
67 		if (error)						\
68 			break;						\
69 		if ((extflags) & LK_SLEEPFAIL) {			\
70 			error = ENOLCK;					\
71 			break;						\
72 		}							\
73 	}
74 
75 /*
76  * Initialize a lock; required before use.
77  */
78 void
79 lock_init(lkp, prio, wmesg, timo, flags)
80 	struct lock *lkp;
81 	int prio;
82 	char *wmesg;
83 	int timo;
84 	int flags;
85 {
86 	bzero(lkp, sizeof(struct lock));
87 	atomic_lock_init(&lkp->lk_interlock);
88 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
89 	lkp->lk_prio = prio;
90 	lkp->lk_timo = timo;
91 	lkp->lk_wmesg = wmesg;
92 	lkp->lk_lockholder = LK_NOPROC;
93 }
94 
95 /*
96  * Determine the status of a lock.
97  */
98 int
99 lockstatus(lkp)
100 	struct lock *lkp;
101 {
102 	int lock_type = 0;
103 
104 	atomic_lock(&lkp->lk_interlock);
105 	if (lkp->lk_exclusivecount != 0)
106 		lock_type = LK_EXCLUSIVE;
107 	else if (lkp->lk_sharecount != 0)
108 		lock_type = LK_SHARED;
109 	atomic_unlock(&lkp->lk_interlock);
110 	return (lock_type);
111 }
112 
113 /*
114  * Set, change, or release a lock.
115  *
116  * Shared requests increment the shared count. Exclusive requests set the
117  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
118  * accepted shared locks and shared-to-exclusive upgrades to go away.
119  */
120 int
121 lockmgr(lkp, flags, p)
122 	volatile struct lock *lkp;
123 	u_int flags;
124 	struct proc *p;
125 {
126 	int error;
127 	pid_t pid;
128 	volatile int extflags;
129 
130 	error = 0;
131 	pid = p->p_pid;
132 	atomic_lock(&lkp->lk_interlock);
133 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
134 	if (lkp->lk_flags & LK_DRAINED)
135 		panic("lockmgr: using decommissioned lock");
136 
137 	switch (flags & LK_TYPE_MASK) {
138 
139 	case LK_SHARED:
140 		if (lkp->lk_lockholder != pid) {
141 			/*
142 			 * If just polling, check to see if we will block.
143 			 */
144 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
145 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
146 				error = EBUSY;
147 				break;
148 			}
149 			/*
150 			 * Wait for exclusive locks and upgrades to clear.
151 			 */
152 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
153 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
154 			if (error)
155 				break;
156 			lkp->lk_sharecount++;
157 			break;
158 		}
159 		/*
160 		 * We hold an exclusive lock, so downgrade it to shared.
161 		 * An alternative would be to fail with EDEADLK.
162 		 */
163 		lkp->lk_sharecount++;
164 		/* fall into downgrade */
165 
166 	case LK_DOWNGRADE:
167 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
168 			panic("lockmgr: not holding exclusive lock");
169 		lkp->lk_sharecount += lkp->lk_exclusivecount;
170 		lkp->lk_exclusivecount = 0;
171 		lkp->lk_flags &= ~LK_HAVE_EXCL;
172 		lkp->lk_lockholder = LK_NOPROC;
173 		if (lkp->lk_waitcount)
174 			wakeup((void *)lkp);
175 		break;
176 
177 	case LK_EXCLUPGRADE:
178 		/*
179 		 * If another process is ahead of us to get an upgrade,
180 		 * then we want to fail rather than have an intervening
181 		 * exclusive access.
182 		 */
183 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
184 			lkp->lk_sharecount--;
185 			error = EBUSY;
186 			break;
187 		}
188 		/* fall into normal upgrade */
189 
190 	case LK_UPGRADE:
191 		/*
192 		 * Upgrade a shared lock to an exclusive one. If another
193 		 * shared lock has already requested an upgrade to an
194 		 * exclusive lock, our shared lock is released and an
195 		 * exclusive lock is requested (which will be granted
196 		 * after the upgrade). If we return an error, the file
197 		 * will always be unlocked.
198 		 */
199 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
200 			panic("lockmgr: upgrade exclusive lock");
201 		lkp->lk_sharecount--;
202 		/*
203 		 * If we are just polling, check to see if we will block.
204 		 */
205 		if ((extflags & LK_NOWAIT) &&
206 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
207 		     lkp->lk_sharecount > 1)) {
208 			error = EBUSY;
209 			break;
210 		}
211 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
212 			/*
213 			 * We are first shared lock to request an upgrade, so
214 			 * request upgrade and wait for the shared count to
215 			 * drop to zero, then take exclusive lock.
216 			 */
217 			lkp->lk_flags |= LK_WANT_UPGRADE;
218 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
219 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
220 			if (error)
221 				break;
222 			lkp->lk_flags |= LK_HAVE_EXCL;
223 			lkp->lk_lockholder = pid;
224 			if (lkp->lk_exclusivecount != 0)
225 				panic("lockmgr: non-zero exclusive count");
226 			lkp->lk_exclusivecount = 1;
227 			break;
228 		}
229 		/*
230 		 * Someone else has requested upgrade. Release our shared
231 		 * lock, awaken upgrade requestor if we are the last shared
232 		 * lock, then request an exclusive lock.
233 		 */
234 		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
235 			wakeup((void *)lkp);
236 		/* fall into exclusive request */
237 
238 	case LK_EXCLUSIVE:
239 		if (lkp->lk_lockholder == pid) {
240 			/*
241 			 *	Recursive lock.
242 			 */
243 			if ((extflags & LK_CANRECURSE) == 0)
244 				panic("lockmgr: locking against myself");
245 			lkp->lk_exclusivecount++;
246 			break;
247 		}
248 		/*
249 		 * If we are just polling, check to see if we will sleep.
250 		 */
251 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
252 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
253 		     lkp->lk_sharecount != 0)) {
254 			error = EBUSY;
255 			break;
256 		}
257 		/*
258 		 * Try to acquire the want_exclusive flag.
259 		 */
260 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
261 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
262 		if (error)
263 			break;
264 		lkp->lk_flags |= LK_WANT_EXCL;
265 		/*
266 		 * Wait for shared locks and upgrades to finish.
267 		 */
268 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
269 		       (lkp->lk_flags & LK_WANT_UPGRADE));
270 		lkp->lk_flags &= ~LK_WANT_EXCL;
271 		if (error)
272 			break;
273 		lkp->lk_flags |= LK_HAVE_EXCL;
274 		lkp->lk_lockholder = pid;
275 		if (lkp->lk_exclusivecount != 0)
276 			panic("lockmgr: non-zero exclusive count");
277 		lkp->lk_exclusivecount = 1;
278 		break;
279 
280 	case LK_RELEASE:
281 		if (lkp->lk_exclusivecount != 0) {
282 			if (pid != lkp->lk_lockholder)
283 				panic("lockmgr: pid %d, not %s %d unlocking",
284 				    pid, "exclusive lock holder",
285 				    lkp->lk_lockholder);
286 			lkp->lk_exclusivecount--;
287 			if (lkp->lk_exclusivecount == 0) {
288 				lkp->lk_flags &= ~LK_HAVE_EXCL;
289 				lkp->lk_lockholder = LK_NOPROC;
290 			}
291 		} else if (lkp->lk_sharecount != 0)
292 			lkp->lk_sharecount--;
293 		if (lkp->lk_waitcount)
294 			wakeup((void *)lkp);
295 		break;
296 
297 	case LK_DRAIN:
298 		/*
299 		 * If we are just polling, check to see if we will sleep.
300 		 */
301 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
302 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
303 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
304 			error = EBUSY;
305 			break;
306 		}
307 		PAUSE(lkp, ((lkp->lk_flags &
308 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
309 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
310 		for (error = 0; ((lkp->lk_flags &
311 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
312 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
313 			lkp->lk_flags |= LK_WAITDRAIN;
314 			atomic_unlock(&lkp->lk_interlock);
315 			if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
316 			    lkp->lk_wmesg, lkp->lk_timo))
317 				return (error);
318 			if ((extflags) & LK_SLEEPFAIL)
319 				return (ENOLCK);
320 			atomic_lock(&lkp->lk_interlock);
321 		}
322 		lkp->lk_flags |= LK_DRAINED;
323 		break;
324 
325 	default:
326 		atomic_unlock(&lkp->lk_interlock);
327 		panic("lockmgr: unknown locktype request %d",
328 		    flags & LK_TYPE_MASK);
329 		/* NOTREACHED */
330 	}
331 	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
332 	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
333 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
334 		lkp->lk_flags &= ~LK_WAITDRAIN;
335 		wakeup((void *)&lkp->lk_flags);
336 	}
337 	atomic_unlock(&lkp->lk_interlock);
338 	return (error);
339 }
340