xref: /csrg-svn/sys/kern/kern_lock.c (revision 68780)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.4 (Berkeley) 04/11/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #if NCPUS > 1
24 
25 /*
26  * For multiprocessor system, try spin lock first.
27  *
28  * This should be inline expanded below, but we cannot have #if
29  * inside a multiline define.
30  */
31 int lock_wait_time = 100;
32 #define PAUSE(lkp, wanted)						\
33 		if (lock_wait_time > 0) {				\
34 			int i;						\
35 									\
36 			atomic_unlock(&lkp->lk_interlock);		\
37 			for (i = lock_wait_time; i > 0; i--)		\
38 				if (!(wanted))				\
39 					break;				\
40 			atomic_lock(&lkp->lk_interlock);		\
41 		}							\
42 		if (!(wanted))						\
43 			break;
44 
45 #else /* NCPUS == 1 */
46 
47 /*
48  * It is an error to spin on a uniprocessor as nothing will ever cause
49  * the atomic lock to clear while we are executing.
50  */
51 #define PAUSE(lkp, wanted)
52 
53 #endif /* NCPUS == 1 */
54 
55 /*
56  * Acquire a resource.
57  */
58 #define ACQUIRE(lkp, error, extflags, wanted)				\
59 	PAUSE(lkp, wanted);						\
60 	for (error = 0; wanted; ) {					\
61 		(lkp)->lk_flags |= LK_WAITING;				\
62 		atomic_unlock(&(lkp)->lk_interlock);			\
63 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
64 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
65 		atomic_lock(&(lkp)->lk_interlock);			\
66 		if (error)						\
67 			break;						\
68 		if ((extflags) & LK_SLEEPFAIL) {			\
69 			error = ENOLCK;					\
70 			break;						\
71 		}							\
72 	}
73 
74 /*
75  * Initialize a lock; required before use.
76  */
77 void lock_init(lkp, prio, wmesg, timo, flags)
78 	struct lock *lkp;
79 	int prio;
80 	char *wmesg;
81 	int timo;
82 	int flags;
83 {
84 	bzero(lkp, sizeof(struct lock));
85 	atomic_lock_init(&lkp->lk_interlock);
86 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
87 	lkp->lk_prio = prio;
88 	lkp->lk_timo = timo;
89 	lkp->lk_wmesg = wmesg;
90 	lkp->lk_lockholder = LK_NOPROC;
91 }
92 
93 /*
94  * Determine the status of a lock.
95  */
96 int
97 lockstatus(lkp)
98 	struct lock *lkp;
99 {
100 	int lock_type = 0;
101 
102 	atomic_lock(&lkp->lk_interlock);
103 	if (lkp->lk_exclusivecount != 0)
104 		lock_type = LK_EXCLUSIVE;
105 	else if (lkp->lk_sharecount != 0)
106 		lock_type = LK_SHARED;
107 	atomic_unlock(&lkp->lk_interlock);
108 	return (lock_type);
109 }
110 
111 /*
112  * Set, change, or release a lock.
113  *
114  * Shared requests increment the shared count. Exclusive requests set the
115  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
116  * accepted shared locks and shared-to-exclusive upgrades to go away.
117  */
118 lockmgr(lkp, p, flags)
119 	volatile struct lock *lkp;
120 	struct proc *p;
121 	u_int flags;
122 {
123 	int error;
124 	pid_t pid;
125 	volatile int extflags;
126 
127 	pid = p->p_pid;
128 	atomic_lock(&lkp->lk_interlock);
129 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
130 
131 	switch (flags & LK_TYPE_MASK) {
132 
133 	case LK_SHARED:
134 		if (lkp->lk_lockholder != pid) {
135 			/*
136 			 * If just polling, check to see if we will block.
137 			 */
138 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
139 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
140 				atomic_unlock(&lkp->lk_interlock);
141 				return (EBUSY);
142 			}
143 			/*
144 			 * Wait for exclusive locks and upgrades to clear.
145 			 */
146 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
147 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
148 			if (error) {
149 				atomic_unlock(&lkp->lk_interlock);
150 				return (error);
151 			}
152 			lkp->lk_sharecount++;
153 			atomic_unlock(&lkp->lk_interlock);
154 			return (0);
155 		}
156 		/*
157 		 * We hold an exclusive lock, so downgrade it to shared.
158 		 * An alternative would be to fail with EDEADLK.
159 		 */
160 		lkp->lk_sharecount++;
161 		/* fall into downgrade */
162 
163 	case LK_DOWNGRADE:
164 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
165 			panic("lockmgr: not holding exclusive lock");
166 		lkp->lk_sharecount += lkp->lk_exclusivecount;
167 		lkp->lk_exclusivecount = 0;
168 		lkp->lk_flags &= ~LK_HAVE_EXCL;
169 		lkp->lk_lockholder = LK_NOPROC;
170 		if (lkp->lk_flags & LK_WAITING) {
171 			lkp->lk_flags &= ~LK_WAITING;
172 			wakeup((void *)lkp);
173 		}
174 		atomic_unlock(&lkp->lk_interlock);
175 		return (0);
176 
177 	case LK_EXCLUPGRADE:
178 		/*
179 		 * If another process is ahead of us to get an upgrade,
180 		 * then we want to fail rather than have an intervening
181 		 * exclusive access.
182 		 */
183 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
184 			lkp->lk_sharecount--;
185 			atomic_unlock(&lkp->lk_interlock);
186 			return (EBUSY);
187 		}
188 		/* fall into normal upgrade */
189 
190 	case LK_UPGRADE:
191 		/*
192 		 * Upgrade a shared lock to an exclusive one. If another
193 		 * shared lock has already requested an upgrade to an
194 		 * exclusive lock, our shared lock is released and an
195 		 * exclusive lock is requested (which will be granted
196 		 * after the upgrade). If we return an error, the file
197 		 * will always be unlocked.
198 		 */
199 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
200 			panic("lockmgr: upgrade exclusive lock");
201 		lkp->lk_sharecount--;
202 		/*
203 		 * If we are just polling, check to see if we will block.
204 		 */
205 		if ((extflags & LK_NOWAIT) &&
206 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
207 		     lkp->lk_sharecount > 1)) {
208 			atomic_unlock(&lkp->lk_interlock);
209 			return (EBUSY);
210 		}
211 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
212 			/*
213 			 * We are first shared lock to request an upgrade, so
214 			 * request upgrade and wait for the shared count to
215 			 * drop to zero, then take exclusive lock.
216 			 */
217 			lkp->lk_flags |= LK_WANT_UPGRADE;
218 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
219 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
220 			if (error) {
221 				atomic_unlock(&lkp->lk_interlock);
222 				return (error);
223 			}
224 			lkp->lk_flags |= LK_HAVE_EXCL;
225 			lkp->lk_lockholder = pid;
226 			if (lkp->lk_exclusivecount != 0)
227 				panic("lockmgr: non-zero exclusive count");
228 			lkp->lk_exclusivecount = 1;
229 			atomic_unlock(&lkp->lk_interlock);
230 			return (0);
231 		}
232 		/*
233 		 * Someone else has requested upgrade. Release our shared
234 		 * lock, awaken upgrade requestor if we are the last shared
235 		 * lock, then request an exclusive lock.
236 		 */
237 		if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) {
238 			lkp->lk_flags &= ~LK_WAITING;
239 			wakeup((void *)lkp);
240 		}
241 		/* fall into exclusive request */
242 
243 	case LK_EXCLUSIVE:
244 		if (lkp->lk_lockholder == pid) {
245 			/*
246 			 *	Recursive lock.
247 			 */
248 			if ((extflags & LK_CANRECURSE) == 0)
249 				panic("lockmgr: locking against myself");
250 			lkp->lk_exclusivecount++;
251 			atomic_unlock(&lkp->lk_interlock);
252 			return (0);
253 		}
254 		/*
255 		 * If we are just polling, check to see if we will sleep.
256 		 */
257 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
258 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
259 		     lkp->lk_sharecount != 0)) {
260 			atomic_unlock(&lkp->lk_interlock);
261 			return (EBUSY);
262 		}
263 		/*
264 		 * Try to acquire the want_exclusive flag.
265 		 */
266 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
267 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
268 		if (error) {
269 			atomic_unlock(&lkp->lk_interlock);
270 			return (error);
271 		}
272 		lkp->lk_flags |= LK_WANT_EXCL;
273 		/*
274 		 * Wait for shared locks and upgrades to finish.
275 		 */
276 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
277 		       (lkp->lk_flags & LK_WANT_UPGRADE));
278 		lkp->lk_flags &= ~LK_WANT_EXCL;
279 		if (error) {
280 			atomic_unlock(&lkp->lk_interlock);
281 			return (error);
282 		}
283 		lkp->lk_flags |= LK_HAVE_EXCL;
284 		lkp->lk_lockholder = pid;
285 		if (lkp->lk_exclusivecount != 0)
286 			panic("lockmgr: non-zero exclusive count");
287 		lkp->lk_exclusivecount = 1;
288 		atomic_unlock(&lkp->lk_interlock);
289 		return (0);
290 
291 	case LK_RELEASE:
292 		if (lkp->lk_exclusivecount != 0) {
293 			lkp->lk_exclusivecount--;
294 			if (lkp->lk_exclusivecount == 0) {
295 				lkp->lk_flags &= ~LK_HAVE_EXCL;
296 				lkp->lk_lockholder = LK_NOPROC;
297 			}
298 		} else if (lkp->lk_sharecount != 0)
299 			lkp->lk_sharecount--;
300 		if (lkp->lk_flags & LK_WAITING) {
301 			lkp->lk_flags &= ~LK_WAITING;
302 			wakeup((void *)lkp);
303 		}
304 		atomic_unlock(&lkp->lk_interlock);
305 		return (0);
306 
307 	default:
308 		atomic_unlock(&lkp->lk_interlock);
309 		panic("lockmgr: unknown locktype request %d",
310 		    flags & LK_TYPE_MASK);
311 		/* NOTREACHED */
312 	}
313 }
314