xref: /csrg-svn/sys/kern/kern_lock.c (revision 68779)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.3 (Berkeley) 04/11/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #if NCPUS > 1
24 
25 /*
26  * For multiprocessor system, try spin lock first.
27  *
28  * This should be inline expanded below, but we cannot have #if
29  * inside a multiline define.
30  */
31 int lock_wait_time = 100;
32 #define PAUSE(lkp, wanted)						\
33 		if (lock_wait_time > 0) {				\
34 			int i;						\
35 									\
36 			atomic_unlock(&lkp->lk_interlock);		\
37 			for (i = lock_wait_time; i > 0; i--)		\
38 				if (!(wanted))				\
39 					break;				\
40 			atomic_lock(&lkp->lk_interlock);		\
41 		}							\
42 		if (!(wanted))						\
43 			break;
44 
45 #else /* NCPUS == 1 */
46 
47 /*
48  * It is an error to spin on a uniprocessor as nothing will ever cause
49  * the atomic lock to clear while we are executing.
50  */
51 #define PAUSE(lkp, wanted)
52 
53 #endif /* NCPUS == 1 */
54 
55 /*
56  * Acquire a resource.
57  */
58 #define ACQUIRE(lkp, error, extflags, wanted)				\
59 	PAUSE(lkp, wanted);						\
60 	for (error = 0; wanted; ) {					\
61 		(lkp)->lk_flags |= LK_WAITING;				\
62 		atomic_unlock(&(lkp)->lk_interlock);			\
63 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
64 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
65 		atomic_lock(&(lkp)->lk_interlock);			\
66 		if (error)						\
67 			break;						\
68 		if ((extflags) & LK_SLEEPFAIL) {			\
69 			error = ENOLCK;					\
70 			break;						\
71 		}							\
72 	}
73 
74 /*
75  * Initialize a lock; required before use.
76  */
77 void lock_init(lkp, prio, wmesg, timo, flags)
78 	struct lock *lkp;
79 	int prio;
80 	char *wmesg;
81 	int timo;
82 	int flags;
83 {
84 	bzero(lkp, sizeof(struct lock));
85 	atomic_lock_init(&lkp->lk_interlock);
86 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
87 	lkp->lk_prio = prio;
88 	lkp->lk_timo = timo;
89 	lkp->lk_wmesg = wmesg;
90 	lkp->lk_lockholder = LK_NOPROC;
91 }
92 
93 /*
94  * Set, change, or release a lock.
95  *
96  * Shared requests increment the shared count. Exclusive requests set the
97  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
98  * accepted shared locks and shared-to-exclusive upgrades to go away.
99  */
100 lockmgr(lkp, p, flags)
101 	volatile struct lock *lkp;
102 	struct proc *p;
103 	u_int flags;
104 {
105 	int error;
106 	pid_t pid;
107 	volatile int extflags;
108 
109 	pid = p->p_pid;
110 	atomic_lock(&lkp->lk_interlock);
111 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
112 
113 	switch (flags & LK_TYPE_MASK) {
114 
115 	case LK_SHARED:
116 		if (lkp->lk_lockholder != pid) {
117 			/*
118 			 * If just polling, check to see if we will block.
119 			 */
120 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
121 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
122 				atomic_unlock(&lkp->lk_interlock);
123 				return (EBUSY);
124 			}
125 			/*
126 			 * Wait for exclusive locks and upgrades to clear.
127 			 */
128 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
129 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
130 			if (error) {
131 				atomic_unlock(&lkp->lk_interlock);
132 				return (error);
133 			}
134 			lkp->lk_sharecount++;
135 			atomic_unlock(&lkp->lk_interlock);
136 			return (0);
137 		}
138 		/*
139 		 * We hold an exclusive lock, so downgrade it to shared.
140 		 * An alternative would be to fail with EDEADLK.
141 		 */
142 		lkp->lk_sharecount++;
143 		/* fall into downgrade */
144 
145 	case LK_DOWNGRADE:
146 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
147 			panic("lockmgr: not holding exclusive lock");
148 		lkp->lk_sharecount += lkp->lk_exclusivecount;
149 		lkp->lk_exclusivecount = 0;
150 		lkp->lk_flags &= ~LK_HAVE_EXCL;
151 		lkp->lk_lockholder = LK_NOPROC;
152 		if (lkp->lk_flags & LK_WAITING) {
153 			lkp->lk_flags &= ~LK_WAITING;
154 			wakeup((void *)lkp);
155 		}
156 		atomic_unlock(&lkp->lk_interlock);
157 		return (0);
158 
159 	case LK_EXCLUPGRADE:
160 		/*
161 		 * If another process is ahead of us to get an upgrade,
162 		 * then we want to fail rather than have an intervening
163 		 * exclusive access.
164 		 */
165 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
166 			lkp->lk_sharecount--;
167 			atomic_unlock(&lkp->lk_interlock);
168 			return (EBUSY);
169 		}
170 		/* fall into normal upgrade */
171 
172 	case LK_UPGRADE:
173 		/*
174 		 * Upgrade a shared lock to an exclusive one. If another
175 		 * shared lock has already requested an upgrade to an
176 		 * exclusive lock, our shared lock is released and an
177 		 * exclusive lock is requested (which will be granted
178 		 * after the upgrade). If we return an error, the file
179 		 * will always be unlocked.
180 		 */
181 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
182 			panic("lockmgr: upgrade exclusive lock");
183 		lkp->lk_sharecount--;
184 		/*
185 		 * If we are just polling, check to see if we will block.
186 		 */
187 		if ((extflags & LK_NOWAIT) &&
188 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
189 		     lkp->lk_sharecount > 1)) {
190 			atomic_unlock(&lkp->lk_interlock);
191 			return (EBUSY);
192 		}
193 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
194 			/*
195 			 * We are first shared lock to request an upgrade, so
196 			 * request upgrade and wait for the shared count to
197 			 * drop to zero, then take exclusive lock.
198 			 */
199 			lkp->lk_flags |= LK_WANT_UPGRADE;
200 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
201 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
202 			if (error) {
203 				atomic_unlock(&lkp->lk_interlock);
204 				return (error);
205 			}
206 			lkp->lk_flags |= LK_HAVE_EXCL;
207 			lkp->lk_lockholder = pid;
208 			if (lkp->lk_exclusivecount != 0)
209 				panic("lockmgr: non-zero exclusive count");
210 			lkp->lk_exclusivecount = 1;
211 			atomic_unlock(&lkp->lk_interlock);
212 			return (0);
213 		}
214 		/*
215 		 * Someone else has requested upgrade. Release our shared
216 		 * lock, awaken upgrade requestor if we are the last shared
217 		 * lock, then request an exclusive lock.
218 		 */
219 		if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) {
220 			lkp->lk_flags &= ~LK_WAITING;
221 			wakeup((void *)lkp);
222 		}
223 		/* fall into exclusive request */
224 
225 	case LK_EXCLUSIVE:
226 		if (lkp->lk_lockholder == pid) {
227 			/*
228 			 *	Recursive lock.
229 			 */
230 			if ((extflags & LK_CANRECURSE) == 0)
231 				panic("lockmgr: locking against myself");
232 			lkp->lk_exclusivecount++;
233 			atomic_unlock(&lkp->lk_interlock);
234 			return (0);
235 		}
236 		/*
237 		 * If we are just polling, check to see if we will sleep.
238 		 */
239 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
240 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
241 		     lkp->lk_sharecount != 0)) {
242 			atomic_unlock(&lkp->lk_interlock);
243 			return (EBUSY);
244 		}
245 		/*
246 		 * Try to acquire the want_exclusive flag.
247 		 */
248 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
249 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
250 		if (error) {
251 			atomic_unlock(&lkp->lk_interlock);
252 			return (error);
253 		}
254 		lkp->lk_flags |= LK_WANT_EXCL;
255 		/*
256 		 * Wait for shared locks and upgrades to finish.
257 		 */
258 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
259 		       (lkp->lk_flags & LK_WANT_UPGRADE));
260 		lkp->lk_flags &= ~LK_WANT_EXCL;
261 		if (error) {
262 			atomic_unlock(&lkp->lk_interlock);
263 			return (error);
264 		}
265 		lkp->lk_flags |= LK_HAVE_EXCL;
266 		lkp->lk_lockholder = pid;
267 		if (lkp->lk_exclusivecount != 0)
268 			panic("lockmgr: non-zero exclusive count");
269 		lkp->lk_exclusivecount = 1;
270 		atomic_unlock(&lkp->lk_interlock);
271 		return (0);
272 
273 	case LK_RELEASE:
274 		if (lkp->lk_exclusivecount != 0) {
275 			lkp->lk_exclusivecount--;
276 			if (lkp->lk_exclusivecount == 0) {
277 				lkp->lk_flags &= ~LK_HAVE_EXCL;
278 				lkp->lk_lockholder = LK_NOPROC;
279 			}
280 		} else if (lkp->lk_sharecount != 0)
281 			lkp->lk_sharecount--;
282 		if (lkp->lk_flags & LK_WAITING) {
283 			lkp->lk_flags &= ~LK_WAITING;
284 			wakeup((void *)lkp);
285 		}
286 		atomic_unlock(&lkp->lk_interlock);
287 		return (0);
288 
289 	default:
290 		atomic_unlock(&lkp->lk_interlock);
291 		panic("lockmgr: unknown locktype request %d",
292 		    flags & LK_TYPE_MASK);
293 		/* NOTREACHED */
294 	}
295 }
296