xref: /netbsd-src/sys/kern/kern_condvar.c (revision cac8e449158efc7261bebc8657cbb0125a2cfdde)
1 /*	$NetBSD: kern_condvar.c,v 1.25 2008/06/16 12:03:01 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Kernel condition variable implementation.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.25 2008/06/16 12:03:01 ad Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/systm.h>
43 #include <sys/condvar.h>
44 #include <sys/sleepq.h>
45 #include <sys/lockdebug.h>
46 #include <sys/cpu.h>
47 
48 #include <uvm/uvm_extern.h>
49 
50 #define	CV_SLEEPQ(cv)	((sleepq_t *)(cv)->cv_opaque)
51 #define	CV_DEBUG_P(cv)	((cv)->cv_wmesg != nodebug)
52 #define	CV_RA		((uintptr_t)__builtin_return_address(0))
53 
54 static u_int	cv_unsleep(lwp_t *, bool);
55 static void	cv_wakeup_one(kcondvar_t *);
56 static void	cv_wakeup_all(kcondvar_t *);
57 
58 static syncobj_t cv_syncobj = {
59 	SOBJ_SLEEPQ_SORTED,
60 	cv_unsleep,
61 	sleepq_changepri,
62 	sleepq_lendpri,
63 	syncobj_noowner,
64 };
65 
66 lockops_t cv_lockops = {
67 	"Condition variable",
68 	LOCKOPS_CV,
69 	NULL
70 };
71 
72 static const char deadcv[] = "deadcv";
73 static const char nodebug[] = "nodebug";
74 
75 /*
76  * cv_init:
77  *
78  *	Initialize a condition variable for use.
79  */
80 void
81 cv_init(kcondvar_t *cv, const char *wmesg)
82 {
83 #ifdef LOCKDEBUG
84 	bool dodebug;
85 
86 	dodebug = LOCKDEBUG_ALLOC(cv, &cv_lockops,
87 	    (uintptr_t)__builtin_return_address(0));
88 	if (!dodebug) {
89 		/* XXX This will break vfs_lockf. */
90 		wmesg = nodebug;
91 	}
92 #endif
93 	KASSERT(wmesg != NULL);
94 	cv->cv_wmesg = wmesg;
95 	sleepq_init(CV_SLEEPQ(cv));
96 }
97 
98 /*
99  * cv_destroy:
100  *
101  *	Tear down a condition variable.
102  */
103 void
104 cv_destroy(kcondvar_t *cv)
105 {
106 
107 	LOCKDEBUG_FREE(CV_DEBUG_P(cv), cv);
108 #ifdef DIAGNOSTIC
109 	KASSERT(cv_is_valid(cv));
110 	cv->cv_wmesg = deadcv;
111 #endif
112 }
113 
114 /*
115  * cv_enter:
116  *
117  *	Look up and lock the sleep queue corresponding to the given
118  *	condition variable, and increment the number of waiters.
119  */
120 static inline void
121 cv_enter(kcondvar_t *cv, kmutex_t *mtx, lwp_t *l)
122 {
123 	sleepq_t *sq;
124 	kmutex_t *mp;
125 
126 	KASSERT(cv_is_valid(cv));
127 	KASSERT(!cpu_intr_p());
128 	KASSERT((l->l_pflag & LP_INTR) == 0 || panicstr != NULL);
129 
130 	LOCKDEBUG_LOCKED(CV_DEBUG_P(cv), cv, mtx, CV_RA, 0);
131 
132 	l->l_kpriority = true;
133 	mp = sleepq_hashlock(cv);
134 	sq = CV_SLEEPQ(cv);
135 	sleepq_enter(sq, l, mp);
136 	sleepq_enqueue(sq, cv, cv->cv_wmesg, &cv_syncobj);
137 	mutex_exit(mtx);
138 	KASSERT(cv_has_waiters(cv));
139 }
140 
141 /*
142  * cv_exit:
143  *
144  *	After resuming execution, check to see if we have been restarted
145  *	as a result of cv_signal().  If we have, but cannot take the
146  *	wakeup (because of eg a pending Unix signal or timeout) then try
147  *	to ensure that another LWP sees it.  This is necessary because
148  *	there may be multiple waiters, and at least one should take the
149  *	wakeup if possible.
150  */
151 static inline int
152 cv_exit(kcondvar_t *cv, kmutex_t *mtx, lwp_t *l, const int error)
153 {
154 
155 	mutex_enter(mtx);
156 	if (__predict_false(error != 0))
157 		cv_signal(cv);
158 
159 	LOCKDEBUG_UNLOCKED(CV_DEBUG_P(cv), cv, CV_RA, 0);
160 	KASSERT(cv_is_valid(cv));
161 
162 	return error;
163 }
164 
165 /*
166  * cv_unsleep:
167  *
168  *	Remove an LWP from the condition variable and sleep queue.  This
169  *	is called when the LWP has not been awoken normally but instead
170  *	interrupted: for example, when a signal is received.  Must be
171  *	called with the LWP locked, and must return it unlocked.
172  */
173 static u_int
174 cv_unsleep(lwp_t *l, bool cleanup)
175 {
176 	kcondvar_t *cv;
177 
178 	cv = (kcondvar_t *)(uintptr_t)l->l_wchan;
179 
180 	KASSERT(l->l_wchan == (wchan_t)cv);
181 	KASSERT(l->l_sleepq == CV_SLEEPQ(cv));
182 	KASSERT(cv_is_valid(cv));
183 	KASSERT(cv_has_waiters(cv));
184 
185 	return sleepq_unsleep(l, cleanup);
186 }
187 
188 /*
189  * cv_wait:
190  *
191  *	Wait non-interruptably on a condition variable until awoken.
192  */
193 void
194 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
195 {
196 	lwp_t *l = curlwp;
197 
198 	KASSERT(mutex_owned(mtx));
199 
200 	cv_enter(cv, mtx, l);
201 	(void)sleepq_block(0, false);
202 	(void)cv_exit(cv, mtx, l, 0);
203 }
204 
205 /*
206  * cv_wait_sig:
207  *
208  *	Wait on a condition variable until a awoken or a signal is received.
209  *	Will also return early if the process is exiting.  Returns zero if
210  *	awoken normallly, ERESTART if a signal was received and the system
211  *	call is restartable, or EINTR otherwise.
212  */
213 int
214 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
215 {
216 	lwp_t *l = curlwp;
217 	int error;
218 
219 	KASSERT(mutex_owned(mtx));
220 
221 	cv_enter(cv, mtx, l);
222 	error = sleepq_block(0, true);
223 	return cv_exit(cv, mtx, l, error);
224 }
225 
226 /*
227  * cv_timedwait:
228  *
229  *	Wait on a condition variable until awoken or the specified timeout
230  *	expires.  Returns zero if awoken normally or EWOULDBLOCK if the
231  *	timeout expired.
232  */
233 int
234 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
235 {
236 	lwp_t *l = curlwp;
237 	int error;
238 
239 	KASSERT(mutex_owned(mtx));
240 
241 	cv_enter(cv, mtx, l);
242 	error = sleepq_block(timo, false);
243 	return cv_exit(cv, mtx, l, error);
244 }
245 
246 /*
247  * cv_timedwait_sig:
248  *
249  *	Wait on a condition variable until a timeout expires, awoken or a
250  *	signal is received.  Will also return early if the process is
251  *	exiting.  Returns zero if awoken normallly, EWOULDBLOCK if the
252  *	timeout expires, ERESTART if a signal was received and the system
253  *	call is restartable, or EINTR otherwise.
254  */
255 int
256 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
257 {
258 	lwp_t *l = curlwp;
259 	int error;
260 
261 	KASSERT(mutex_owned(mtx));
262 
263 	cv_enter(cv, mtx, l);
264 	error = sleepq_block(timo, true);
265 	return cv_exit(cv, mtx, l, error);
266 }
267 
268 /*
269  * cv_signal:
270  *
271  *	Wake the highest priority LWP waiting on a condition variable.
272  *	Must be called with the interlocking mutex held.
273  */
274 void
275 cv_signal(kcondvar_t *cv)
276 {
277 
278 	/* LOCKDEBUG_WAKEUP(CV_DEBUG_P(cv), cv, CV_RA); */
279 	KASSERT(cv_is_valid(cv));
280 
281 	if (__predict_false(!TAILQ_EMPTY(CV_SLEEPQ(cv))))
282 		cv_wakeup_one(cv);
283 }
284 
285 static void __noinline
286 cv_wakeup_one(kcondvar_t *cv)
287 {
288 	sleepq_t *sq;
289 	kmutex_t *mp;
290 	int swapin;
291 	lwp_t *l;
292 
293 	KASSERT(cv_is_valid(cv));
294 
295 	mp = sleepq_hashlock(cv);
296 	sq = CV_SLEEPQ(cv);
297 	l = TAILQ_FIRST(sq);
298 	if (l == NULL) {
299 		mutex_spin_exit(mp);
300 		return;
301 	}
302 	KASSERT(l->l_sleepq == sq);
303 	KASSERT(l->l_mutex == mp);
304 	KASSERT(l->l_wchan == cv);
305 	swapin = sleepq_remove(sq, l);
306 	mutex_spin_exit(mp);
307 
308 	/*
309 	 * If there are newly awakend threads that need to be swapped in,
310 	 * then kick the swapper into action.
311 	 */
312 	if (swapin)
313 		uvm_kick_scheduler();
314 
315 	KASSERT(cv_is_valid(cv));
316 }
317 
318 /*
319  * cv_broadcast:
320  *
321  *	Wake all LWPs waiting on a condition variable.  Must be called
322  *	with the interlocking mutex held.
323  */
324 void
325 cv_broadcast(kcondvar_t *cv)
326 {
327 
328 	/* LOCKDEBUG_WAKEUP(CV_DEBUG_P(cv), cv, CV_RA); */
329 	KASSERT(cv_is_valid(cv));
330 
331 	if (__predict_false(!TAILQ_EMPTY(CV_SLEEPQ(cv))))
332 		cv_wakeup_all(cv);
333 }
334 
335 static void __noinline
336 cv_wakeup_all(kcondvar_t *cv)
337 {
338 	sleepq_t *sq;
339 	kmutex_t *mp;
340 	int swapin;
341 	lwp_t *l, *next;
342 
343 	KASSERT(cv_is_valid(cv));
344 
345 	mp = sleepq_hashlock(cv);
346 	swapin = 0;
347 	sq = CV_SLEEPQ(cv);
348 	for (l = TAILQ_FIRST(sq); l != NULL; l = next) {
349 		KASSERT(l->l_sleepq == sq);
350 		KASSERT(l->l_mutex == mp);
351 		KASSERT(l->l_wchan == cv);
352 		next = TAILQ_NEXT(l, l_sleepchain);
353 		swapin |= sleepq_remove(sq, l);
354 	}
355 	mutex_spin_exit(mp);
356 
357 	/*
358 	 * If there are newly awakend threads that need to be swapped in,
359 	 * then kick the swapper into action.
360 	 */
361 	if (swapin)
362 		uvm_kick_scheduler();
363 
364 	KASSERT(cv_is_valid(cv));
365 }
366 
367 /*
368  * cv_wakeup:
369  *
370  *	Wake all LWPs waiting on a condition variable.  For cases
371  *	where the address may be waited on by mtsleep()/tsleep().
372  *	Not a documented call.
373  */
374 void
375 cv_wakeup(kcondvar_t *cv)
376 {
377 
378 	cv_wakeup_all(cv);
379 	wakeup(cv);
380 }
381 
382 /*
383  * cv_has_waiters:
384  *
385  *	For diagnostic assertions: return non-zero if a condition
386  *	variable has waiters.
387  */
388 bool
389 cv_has_waiters(kcondvar_t *cv)
390 {
391 
392 	return !TAILQ_EMPTY(CV_SLEEPQ(cv));
393 }
394 
395 /*
396  * cv_is_valid:
397  *
398  *	For diagnostic assertions: return non-zero if a condition
399  *	variable appears to be valid.  No locks need be held.
400  */
401 bool
402 cv_is_valid(kcondvar_t *cv)
403 {
404 
405 	return cv->cv_wmesg != deadcv && cv->cv_wmesg != NULL;
406 }
407