xref: /csrg-svn/sys/kern/kern_lock.c (revision 68762)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.1 (Berkeley) 04/09/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #if NCPUS > 1
24 
25 /*
26  * For multiprocessor system, try spin lock first.
27  *
28  * This should be inline expanded below, but we cannot have #if
29  * inside a multiline define.
30  */
31 int lock_wait_time = 100;
32 #define PAUSE(lkp, wanted)						\
33 		if (lock_wait_time > 0) {				\
34 			int i;						\
35 									\
36 			atomic_unlock(&lkp->lk_interlock);		\
37 			for (i = lock_wait_time; i > 0; i--)		\
38 				if (!(wanted))				\
39 					break;				\
40 			atomic_lock(&lkp->lk_interlock);		\
41 		}							\
42 		if (!(wanted))						\
43 			break;
44 
45 #else /* NCPUS == 1 */
46 
47 /*
48  * It is an error to spin on a uniprocessor as nothing will ever cause
49  * the atomic lock to clear while we are executing.
50  */
51 #define PAUSE(lkp, wanted)
52 
53 #endif /* NCPUS == 1 */
54 
55 /*
56  * Acquire a resource.
57  */
58 #define ACQUIRE(lkp, error, extflags, wanted)				\
59 	PAUSE(lkp, wanted);						\
60 	for (error = 0; wanted; ) {					\
61 		(lkp)->lk_flags |= LK_WAITING;				\
62 		atomic_unlock(&(lkp)->lk_interlock);			\
63 		error = tsleep(lkp, (lkp)->lk_prio, (lkp)->lk_wmesg,	\
64 		    (lkp)->lk_timo);					\
65 		atomic_lock(&(lkp)->lk_interlock);			\
66 		(lkp)->lk_flags |= LK_SLEPT;				\
67 		if (error)						\
68 			break;						\
69 		if ((extflags) & LK_SLEEPFAIL) {			\
70 			error = ENOLCK;					\
71 			break;						\
72 		}							\
73 	}
74 
75 /*
76  * Initialize a lock; required before use.
77  */
78 void lock_init(lkp, prio, wmesg, timo, flags)
79 	struct lock *lkp;
80 	int prio;
81 	char *wmesg;
82 	int timo;
83 	int flags;
84 {
85 	bzero(lkp, sizeof(struct lock));
86 	atomic_lock_init(&lkp->lk_interlock);
87 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
88 	lkp->lk_prio = prio;
89 	lkp->lk_timo = timo;
90 	lkp->lk_wmesg = wmesg;
91 	lkp->lk_lockholder = LK_NOPROC;
92 }
93 
94 /*
95  * Set, change, or release a lock.
96  *
97  * Shared requests increment the shared count. Exclusive requests set the
98  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
99  * accepted shared locks and shared-to-exclusive upgrades to go away.
100  */
101 lockmgr(lkp, p, flags)
102 	struct lock *lkp;
103 	struct proc *p;
104 	int flags;
105 {
106 	pid_t pid;
107 	int error, extflags;
108 
109 	pid = p->p_pid;
110 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
111 	lkp->lk_flags &= ~LK_SLEPT;
112 
113 	switch (flags & LK_TYPE_MASK) {
114 
115 	case LK_SHARED:
116 		atomic_lock(&lkp->lk_interlock);
117 		if (lkp->lk_lockholder != pid) {
118 			/*
119 			 * If just polling, check to see if we will block.
120 			 */
121 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
122 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
123 				atomic_unlock(&lkp->lk_interlock);
124 				return (EBUSY);
125 			}
126 			/*
127 			 * Wait for exclusive locks and upgrades to clear.
128 			 */
129 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
130 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
131 			if (error) {
132 				atomic_unlock(&lkp->lk_interlock);
133 				return (error);
134 			}
135 			lkp->lk_sharecount++;
136 			atomic_unlock(&lkp->lk_interlock);
137 			return (0);
138 		}
139 		/*
140 		 * We hold an exclusive lock, so downgrade it to shared.
141 		 * An alternative would be to fail with EDEADLK.
142 		 */
143 		lkp->lk_sharecount++;
144 		atomic_unlock(&lkp->lk_interlock);
145 		/* fall into downgrade */
146 
147 	case LK_DOWNGRADE:
148 		atomic_lock(&lkp->lk_interlock);
149 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
150 			panic("lockmgr: not holding exclusive lock");
151 		lkp->lk_sharecount += lkp->lk_exclusivecount;
152 		lkp->lk_exclusivecount = 0;
153 		lkp->lk_flags &= ~LK_HAVE_EXCL;
154 		lkp->lk_lockholder = LK_NOPROC;
155 		if (lkp->lk_flags & LK_WAITING) {
156 			lkp->lk_flags &= ~LK_WAITING;
157 			wakeup(lkp);
158 		}
159 		atomic_unlock(&lkp->lk_interlock);
160 		return (0);
161 
162 	case LK_UPGRADE:
163 		/*
164 		 * Upgrade a shared lock to an exclusive one. If another
165 		 * shared lock has already requested an upgrade to an
166 		 * exclusive lock, our shared lock is released and an
167 		 * exclusive lock is requested (which will be granted
168 		 * after the upgrade).
169 		 */
170 		atomic_lock(&lkp->lk_interlock);
171 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
172 			panic("lockmgr: upgrade exclusive lock");
173 		/*
174 		 * If we are just polling, check to see if we will block.
175 		 */
176 		if ((extflags & LK_NOWAIT) &&
177 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
178 		     lkp->lk_sharecount > 1)) {
179 			atomic_unlock(&lkp->lk_interlock);
180 			return (EBUSY);
181 		}
182 		lkp->lk_sharecount--;
183 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
184 			/*
185 			 * We are first shared lock to request an upgrade, so
186 			 * request upgrade and wait for the shared count to
187 			 * drop to zero, then take exclusive lock.
188 			 */
189 			lkp->lk_flags |= LK_WANT_UPGRADE;
190 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
191 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
192 			if (error) {
193 				atomic_unlock(&lkp->lk_interlock);
194 				return (error);
195 			}
196 			lkp->lk_flags |= LK_HAVE_EXCL;
197 			lkp->lk_lockholder = pid;
198 			if (lkp->lk_exclusivecount != 0)
199 				panic("lockmgr: non-zero exclusive count");
200 			lkp->lk_exclusivecount = 1;
201 			atomic_unlock(&lkp->lk_interlock);
202 			return (0);
203 		}
204 		/*
205 		 * Someone else has requested upgrade. Release our shared
206 		 * lock, awaken upgrade requestor if we are the last shared
207 		 * lock, then request an exclusive lock.
208 		 */
209 		if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) {
210 			lkp->lk_flags &= ~LK_WAITING;
211 			wakeup(lkp);
212 		}
213 		atomic_unlock(&lkp->lk_interlock);
214 		/* fall into exclusive request */
215 
216 	case LK_EXCLUSIVE:
217 		atomic_lock(&lkp->lk_interlock);
218 		if (lkp->lk_lockholder == pid) {
219 			/*
220 			 *	Recursive lock.
221 			 */
222 			if ((extflags & LK_CANRECURSE) == 0)
223 				panic("lockmgr: locking against myself");
224 			lkp->lk_exclusivecount++;
225 			atomic_unlock(&lkp->lk_interlock);
226 			return (0);
227 		}
228 		/*
229 		 * If we are just polling, check to see if we will sleep.
230 		 */
231 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
232 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
233 		     lkp->lk_sharecount != 0)) {
234 			atomic_unlock(&lkp->lk_interlock);
235 			return (EBUSY);
236 		}
237 		/*
238 		 * Try to acquire the want_exclusive flag.
239 		 */
240 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
241 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
242 		if (error) {
243 			atomic_unlock(&lkp->lk_interlock);
244 			return (error);
245 		}
246 		lkp->lk_flags |= LK_WANT_EXCL;
247 		/*
248 		 * Wait for shared locks and upgrades to finish.
249 		 */
250 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
251 		       (lkp->lk_flags & LK_WANT_UPGRADE));
252 		lkp->lk_flags &= ~LK_WANT_EXCL;
253 		if (error) {
254 			atomic_unlock(&lkp->lk_interlock);
255 			return (error);
256 		}
257 		lkp->lk_flags |= LK_HAVE_EXCL;
258 		lkp->lk_lockholder = pid;
259 		if (lkp->lk_exclusivecount != 0)
260 			panic("lockmgr: non-zero exclusive count");
261 		lkp->lk_exclusivecount = 1;
262 		atomic_unlock(&lkp->lk_interlock);
263 		return (0);
264 
265 	case LK_RELEASE:
266 		atomic_lock(&lkp->lk_interlock);
267 		if (lkp->lk_exclusivecount != 0) {
268 			lkp->lk_exclusivecount--;
269 			if (lkp->lk_exclusivecount == 0) {
270 				lkp->lk_flags &= ~LK_HAVE_EXCL;
271 				lkp->lk_lockholder = LK_NOPROC;
272 			}
273 		} else if (lkp->lk_sharecount != 0)
274 			lkp->lk_sharecount--;
275 		if (lkp->lk_flags & LK_WAITING) {
276 			lkp->lk_flags &= ~LK_WAITING;
277 			wakeup(lkp);
278 		}
279 		atomic_unlock(&lkp->lk_interlock);
280 		return (0);
281 
282 	default:
283 		panic("lockmgr: unknown locktype request %d",
284 		    flags & LK_TYPE_MASK);
285 	}
286 }
287