xref: /netbsd-src/sys/rump/librump/rumpkern/locks.c (revision 7788a0781fe6ff2cce37368b4578a7ade0850cb1)
1 /*	$NetBSD: locks.c,v 1.65 2013/07/03 17:10:28 njoly Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.65 2013/07/03 17:10:28 njoly Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35 
36 #include <rump/rumpuser.h>
37 
38 #include "rump_private.h"
39 
40 /*
41  * Simple lockdebug.  If it's compiled in, it's always active.
42  * Currently available only for mtx/rwlock.
43  */
44 #ifdef LOCKDEBUG
45 #include <sys/lockdebug.h>
46 
47 static lockops_t mutex_lockops = {
48 	"mutex",
49 	LOCKOPS_SLEEP,
50 	NULL
51 };
52 static lockops_t rw_lockops = {
53 	"rwlock",
54 	LOCKOPS_SLEEP,
55 	NULL
56 };
57 
58 #define ALLOCK(lock, ops)		\
59     lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0))
60 #define FREELOCK(lock)			\
61     lockdebug_free(lock)
62 #define WANTLOCK(lock, shar)	\
63     lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar)
64 #define LOCKED(lock, shar)		\
65     lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar)
66 #define UNLOCKED(lock, shar)		\
67     lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar)
68 #else
69 #define ALLOCK(a, b)
70 #define FREELOCK(a)
71 #define WANTLOCK(a, b)
72 #define LOCKED(a, b)
73 #define UNLOCKED(a, b)
74 #endif
75 
76 /*
77  * We map locks to pthread routines.  The difference between kernel
78  * and rumpuser routines is that while the kernel uses static
79  * storage, rumpuser allocates the object from the heap.  This
80  * indirection is necessary because we don't know the size of
81  * pthread objects here.  It is also beneficial, since we can
82  * be easily compatible with the kernel ABI because all kernel
83  * objects regardless of machine architecture are always at least
84  * the size of a pointer.  The downside, of course, is a performance
85  * penalty.
86  */
87 
88 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
89 
90 void
91 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
92 {
93 	int ruflags = RUMPUSER_MTX_KMUTEX;
94 	int isspin;
95 
96 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
97 
98 	/*
99 	 * Try to figure out if the caller wanted a spin mutex or
100 	 * not with this easy set of conditionals.  The difference
101 	 * between a spin mutex and an adaptive mutex for a rump
102 	 * kernel is that the hypervisor does not relinquish the
103 	 * rump kernel CPU context for a spin mutex.  The
104 	 * hypervisor itself may block even when "spinning".
105 	 */
106 	if (type == MUTEX_SPIN) {
107 		isspin = 1;
108 	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
109 	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
110 	    ipl == IPL_SOFTSERIAL) {
111 		isspin = 0;
112 	} else {
113 		isspin = 1;
114 	}
115 
116 	if (isspin)
117 		ruflags |= RUMPUSER_MTX_SPIN;
118 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
119 	ALLOCK(mtx, &mutex_lockops);
120 }
121 
122 void
123 mutex_destroy(kmutex_t *mtx)
124 {
125 
126 	FREELOCK(mtx);
127 	rumpuser_mutex_destroy(RUMPMTX(mtx));
128 }
129 
130 void
131 mutex_enter(kmutex_t *mtx)
132 {
133 
134 	WANTLOCK(mtx, 0);
135 	rumpuser_mutex_enter(RUMPMTX(mtx));
136 	LOCKED(mtx, false);
137 }
138 
139 void
140 mutex_spin_enter(kmutex_t *mtx)
141 {
142 
143 	WANTLOCK(mtx, 0);
144 	rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
145 	LOCKED(mtx, false);
146 }
147 
148 int
149 mutex_tryenter(kmutex_t *mtx)
150 {
151 	int error;
152 
153 	error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
154 	if (error == 0) {
155 		WANTLOCK(mtx, 0);
156 		LOCKED(mtx, false);
157 	}
158 	return error == 0;
159 }
160 
161 void
162 mutex_exit(kmutex_t *mtx)
163 {
164 
165 	UNLOCKED(mtx, false);
166 	rumpuser_mutex_exit(RUMPMTX(mtx));
167 }
168 __strong_alias(mutex_spin_exit,mutex_exit);
169 
170 int
171 mutex_owned(kmutex_t *mtx)
172 {
173 
174 	return mutex_owner(mtx) == curlwp;
175 }
176 
177 struct lwp *
178 mutex_owner(kmutex_t *mtx)
179 {
180 	struct lwp *l;
181 
182 	rumpuser_mutex_owner(RUMPMTX(mtx), &l);
183 	return l;
184 }
185 
186 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
187 
188 /* reader/writer locks */
189 
190 static enum rumprwlock
191 krw2rumprw(const krw_t op)
192 {
193 
194 	switch (op) {
195 	case RW_READER:
196 		return RUMPUSER_RW_READER;
197 	case RW_WRITER:
198 		return RUMPUSER_RW_WRITER;
199 	default:
200 		panic("unknown rwlock type");
201 	}
202 }
203 
204 void
205 rw_init(krwlock_t *rw)
206 {
207 
208 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
209 
210 	rumpuser_rw_init((struct rumpuser_rw **)rw);
211 	ALLOCK(rw, &rw_lockops);
212 }
213 
214 void
215 rw_destroy(krwlock_t *rw)
216 {
217 
218 	FREELOCK(rw);
219 	rumpuser_rw_destroy(RUMPRW(rw));
220 }
221 
222 void
223 rw_enter(krwlock_t *rw, const krw_t op)
224 {
225 
226 
227 	WANTLOCK(rw, op == RW_READER);
228 	rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
229 	LOCKED(rw, op == RW_READER);
230 }
231 
232 int
233 rw_tryenter(krwlock_t *rw, const krw_t op)
234 {
235 	int error;
236 
237 	error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
238 	if (error == 0) {
239 		WANTLOCK(rw, op == RW_READER);
240 		LOCKED(rw, op == RW_READER);
241 	}
242 	return error == 0;
243 }
244 
245 void
246 rw_exit(krwlock_t *rw)
247 {
248 
249 #ifdef LOCKDEBUG
250 	bool shared = !rw_write_held(rw);
251 
252 	if (shared)
253 		KASSERT(rw_read_held(rw));
254 	UNLOCKED(rw, shared);
255 #endif
256 	rumpuser_rw_exit(RUMPRW(rw));
257 }
258 
259 int
260 rw_tryupgrade(krwlock_t *rw)
261 {
262 	int rv;
263 
264 	rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
265 	if (rv == 0) {
266 		UNLOCKED(rw, 1);
267 		WANTLOCK(rw, 0);
268 		LOCKED(rw, 0);
269 	}
270 	return rv == 0;
271 }
272 
273 void
274 rw_downgrade(krwlock_t *rw)
275 {
276 
277 	rumpuser_rw_downgrade(RUMPRW(rw));
278 	UNLOCKED(rw, 0);
279 	WANTLOCK(rw, 1);
280 	LOCKED(rw, 1);
281 }
282 
283 int
284 rw_read_held(krwlock_t *rw)
285 {
286 	int rv;
287 
288 	rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
289 	return rv;
290 }
291 
292 int
293 rw_write_held(krwlock_t *rw)
294 {
295 	int rv;
296 
297 	rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
298 	return rv;
299 }
300 
301 int
302 rw_lock_held(krwlock_t *rw)
303 {
304 
305 	return rw_read_held(rw) || rw_write_held(rw);
306 }
307 
308 /* curriculum vitaes */
309 
310 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
311 
312 void
313 cv_init(kcondvar_t *cv, const char *msg)
314 {
315 
316 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
317 
318 	rumpuser_cv_init((struct rumpuser_cv **)cv);
319 }
320 
321 void
322 cv_destroy(kcondvar_t *cv)
323 {
324 
325 	rumpuser_cv_destroy(RUMPCV(cv));
326 }
327 
328 static int
329 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
330 {
331 	struct lwp *l = curlwp;
332 	int rv;
333 
334 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
335 		/*
336 		 * yield() here, someone might want the cpu
337 		 * to set a condition.  otherwise we'll just
338 		 * loop forever.
339 		 */
340 		yield();
341 		return EINTR;
342 	}
343 
344 	UNLOCKED(mtx, false);
345 
346 	l->l_private = cv;
347 	rv = 0;
348 	if (ts) {
349 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
350 		    ts->tv_sec, ts->tv_nsec))
351 			rv = EWOULDBLOCK;
352 	} else {
353 		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
354 	}
355 
356 	LOCKED(mtx, false);
357 
358 	/*
359 	 * Check for QEXIT.  if so, we need to wait here until we
360 	 * are allowed to exit.
361 	 */
362 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
363 		struct proc *p = l->l_proc;
364 
365 		UNLOCKED(mtx, false);
366 		mutex_exit(mtx); /* drop and retake later */
367 
368 		mutex_enter(p->p_lock);
369 		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
370 			/* avoid recursion */
371 			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
372 			    RUMPMTX(p->p_lock));
373 		}
374 		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
375 		mutex_exit(p->p_lock);
376 
377 		/* ok, we can exit and remove "reference" to l->private */
378 
379 		mutex_enter(mtx);
380 		LOCKED(mtx, false);
381 		rv = EINTR;
382 	}
383 	l->l_private = NULL;
384 
385 	return rv;
386 }
387 
388 void
389 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
390 {
391 
392 	if (__predict_false(rump_threads == 0))
393 		panic("cv_wait without threads");
394 	(void) docvwait(cv, mtx, NULL);
395 }
396 
397 int
398 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
399 {
400 
401 	if (__predict_false(rump_threads == 0))
402 		panic("cv_wait without threads");
403 	return docvwait(cv, mtx, NULL);
404 }
405 
406 int
407 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
408 {
409 	struct timespec ts;
410 	extern int hz;
411 	int rv;
412 
413 	if (ticks == 0) {
414 		rv = cv_wait_sig(cv, mtx);
415 	} else {
416 		ts.tv_sec = ticks / hz;
417 		ts.tv_nsec = (ticks % hz) * (1000000000/hz);
418 		rv = docvwait(cv, mtx, &ts);
419 	}
420 
421 	return rv;
422 }
423 __strong_alias(cv_timedwait_sig,cv_timedwait);
424 
425 void
426 cv_signal(kcondvar_t *cv)
427 {
428 
429 	rumpuser_cv_signal(RUMPCV(cv));
430 }
431 
432 void
433 cv_broadcast(kcondvar_t *cv)
434 {
435 
436 	rumpuser_cv_broadcast(RUMPCV(cv));
437 }
438 
439 bool
440 cv_has_waiters(kcondvar_t *cv)
441 {
442 	int rv;
443 
444 	rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
445 	return rv != 0;
446 }
447 
448 /* this is not much of an attempt, but ... */
449 bool
450 cv_is_valid(kcondvar_t *cv)
451 {
452 
453 	return RUMPCV(cv) != NULL;
454 }
455