xref: /netbsd-src/sys/rump/librump/rumpkern/locks.c (revision a5847cc334d9a7029f6352b847e9e8d71a0f9e0c)
1 /*	$NetBSD: locks.c,v 1.54 2011/03/21 16:41:08 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.54 2011/03/21 16:41:08 pooka Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35 
36 #include <rump/rumpuser.h>
37 
38 #include "rump_private.h"
39 
40 /*
41  * Simple lockdebug.  If it's compiled in, it's always active.
42  * Currently available only for mtx/rwlock.
43  */
44 #ifdef LOCKDEBUG
45 #include <sys/lockdebug.h>
46 
47 static lockops_t mutex_lockops = {
48 	"mutex",
49 	LOCKOPS_SLEEP,
50 	NULL
51 };
52 static lockops_t rw_lockops = {
53 	"rwlock",
54 	LOCKOPS_SLEEP,
55 	NULL
56 };
57 
58 #define ALLOCK(lock, ops)		\
59     lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0))
60 #define FREELOCK(lock)			\
61     lockdebug_free(lock)
62 #define WANTLOCK(lock, shar, try)	\
63     lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar, try)
64 #define LOCKED(lock, shar)		\
65     lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar)
66 #define UNLOCKED(lock, shar)		\
67     lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar)
68 #else
69 #define ALLOCK(a, b)
70 #define FREELOCK(a)
71 #define WANTLOCK(a, b, c)
72 #define LOCKED(a, b)
73 #define UNLOCKED(a, b)
74 #endif
75 
76 /*
77  * We map locks to pthread routines.  The difference between kernel
78  * and rumpuser routines is that while the kernel uses static
79  * storage, rumpuser allocates the object from the heap.  This
80  * indirection is necessary because we don't know the size of
81  * pthread objects here.  It is also beneficial, since we can
82  * be easily compatible with the kernel ABI because all kernel
83  * objects regardless of machine architecture are always at least
84  * the size of a pointer.  The downside, of course, is a performance
85  * penalty.
86  */
87 
88 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
89 
90 void
91 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
92 {
93 
94 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
95 
96 	rumpuser_mutex_init_kmutex((struct rumpuser_mtx **)mtx);
97 	ALLOCK(mtx, &mutex_lockops);
98 }
99 
100 void
101 mutex_destroy(kmutex_t *mtx)
102 {
103 
104 	FREELOCK(mtx);
105 	rumpuser_mutex_destroy(RUMPMTX(mtx));
106 }
107 
108 void
109 mutex_enter(kmutex_t *mtx)
110 {
111 
112 	WANTLOCK(mtx, false, false);
113 	rumpuser_mutex_enter(RUMPMTX(mtx));
114 	LOCKED(mtx, false);
115 }
116 __strong_alias(mutex_spin_enter,mutex_enter);
117 
118 int
119 mutex_tryenter(kmutex_t *mtx)
120 {
121 	int rv;
122 
123 	rv = rumpuser_mutex_tryenter(RUMPMTX(mtx));
124 	if (rv) {
125 		WANTLOCK(mtx, false, true);
126 		LOCKED(mtx, false);
127 	}
128 	return rv;
129 }
130 
131 void
132 mutex_exit(kmutex_t *mtx)
133 {
134 
135 	UNLOCKED(mtx, false);
136 	rumpuser_mutex_exit(RUMPMTX(mtx));
137 }
138 __strong_alias(mutex_spin_exit,mutex_exit);
139 
140 int
141 mutex_owned(kmutex_t *mtx)
142 {
143 
144 	return mutex_owner(mtx) == curlwp;
145 }
146 
147 struct lwp *
148 mutex_owner(kmutex_t *mtx)
149 {
150 
151 	return rumpuser_mutex_owner(RUMPMTX(mtx));
152 }
153 
154 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
155 
156 /* reader/writer locks */
157 
158 void
159 rw_init(krwlock_t *rw)
160 {
161 
162 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
163 
164 	rumpuser_rw_init((struct rumpuser_rw **)rw);
165 	ALLOCK(rw, &rw_lockops);
166 }
167 
168 void
169 rw_destroy(krwlock_t *rw)
170 {
171 
172 	FREELOCK(rw);
173 	rumpuser_rw_destroy(RUMPRW(rw));
174 }
175 
176 void
177 rw_enter(krwlock_t *rw, const krw_t op)
178 {
179 
180 
181 	WANTLOCK(rw, op == RW_READER, false);
182 	rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
183 	LOCKED(rw, op == RW_READER);
184 }
185 
186 int
187 rw_tryenter(krwlock_t *rw, const krw_t op)
188 {
189 	int rv;
190 
191 	rv = rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
192 	if (rv) {
193 		WANTLOCK(rw, op == RW_READER, true);
194 		LOCKED(rw, op == RW_READER);
195 	}
196 	return rv;
197 }
198 
199 void
200 rw_exit(krwlock_t *rw)
201 {
202 
203 #ifdef LOCKDEBUG
204 	bool shared = !rw_write_held(rw);
205 
206 	if (shared)
207 		KASSERT(rw_read_held(rw));
208 	UNLOCKED(rw, shared);
209 #endif
210 	rumpuser_rw_exit(RUMPRW(rw));
211 }
212 
213 /* always fails */
214 int
215 rw_tryupgrade(krwlock_t *rw)
216 {
217 
218 	return 0;
219 }
220 
221 void
222 rw_downgrade(krwlock_t *rw)
223 {
224 
225 #ifdef LOCKDEBUG
226 	KASSERT(!rw_write_held(rw));
227 #endif
228 	/*
229 	 * XXX HACK: How we can downgrade re lock in rump properly.
230 	 */
231 	rw_exit(rw);
232 	rw_enter(rw, RW_READER);
233 	return;
234 }
235 
236 int
237 rw_write_held(krwlock_t *rw)
238 {
239 
240 	return rumpuser_rw_wrheld(RUMPRW(rw));
241 }
242 
243 int
244 rw_read_held(krwlock_t *rw)
245 {
246 
247 	return rumpuser_rw_rdheld(RUMPRW(rw));
248 }
249 
250 int
251 rw_lock_held(krwlock_t *rw)
252 {
253 
254 	return rumpuser_rw_held(RUMPRW(rw));
255 }
256 
257 /* curriculum vitaes */
258 
259 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
260 
261 void
262 cv_init(kcondvar_t *cv, const char *msg)
263 {
264 
265 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
266 
267 	rumpuser_cv_init((struct rumpuser_cv **)cv);
268 }
269 
270 void
271 cv_destroy(kcondvar_t *cv)
272 {
273 
274 	rumpuser_cv_destroy(RUMPCV(cv));
275 }
276 
277 static int
278 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
279 {
280 	struct lwp *l = curlwp;
281 	int rv;
282 
283 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
284 		/*
285 		 * yield() here, someone might want the cpu
286 		 * to set a condition.  otherwise we'll just
287 		 * loop forever.
288 		 */
289 		yield();
290 		return EINTR;
291 	}
292 
293 	UNLOCKED(mtx, false);
294 
295 	l->l_private = cv;
296 	rv = 0;
297 	if (ts) {
298 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
299 		    ts->tv_sec, ts->tv_nsec))
300 			rv = EWOULDBLOCK;
301 	} else {
302 		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
303 	}
304 
305 	LOCKED(mtx, false);
306 
307 	/*
308 	 * Check for QEXIT.  if so, we need to wait here until we
309 	 * are allowed to exit.
310 	 */
311 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
312 		struct proc *p = l->l_proc;
313 
314 		UNLOCKED(mtx, false);
315 		mutex_exit(mtx); /* drop and retake later */
316 
317 		mutex_enter(p->p_lock);
318 		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
319 			/* avoid recursion */
320 			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
321 			    RUMPMTX(p->p_lock));
322 		}
323 		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
324 		mutex_exit(p->p_lock);
325 
326 		/* ok, we can exit and remove "reference" to l->private */
327 
328 		mutex_enter(mtx);
329 		LOCKED(mtx, false);
330 		rv = EINTR;
331 	}
332 	l->l_private = NULL;
333 
334 	return rv;
335 }
336 
337 void
338 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
339 {
340 
341 	if (__predict_false(rump_threads == 0))
342 		panic("cv_wait without threads");
343 	(void) docvwait(cv, mtx, NULL);
344 }
345 
346 int
347 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
348 {
349 
350 	if (__predict_false(rump_threads == 0))
351 		panic("cv_wait without threads");
352 	return docvwait(cv, mtx, NULL);
353 }
354 
355 int
356 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
357 {
358 	struct timespec ts, tick;
359 	extern int hz;
360 	int rv;
361 
362 	if (ticks == 0) {
363 		rv = cv_wait_sig(cv, mtx);
364 	} else {
365 		/*
366 		 * XXX: this fetches rump kernel time, but
367 		 * rumpuser_cv_timedwait uses host time.
368 		 */
369 		nanotime(&ts);
370 		tick.tv_sec = ticks / hz;
371 		tick.tv_nsec = (ticks % hz) * (1000000000/hz);
372 		timespecadd(&ts, &tick, &ts);
373 
374 		rv = docvwait(cv, mtx, &ts);
375 	}
376 
377 	return rv;
378 }
379 __strong_alias(cv_timedwait_sig,cv_timedwait);
380 
381 void
382 cv_signal(kcondvar_t *cv)
383 {
384 
385 	rumpuser_cv_signal(RUMPCV(cv));
386 }
387 
388 void
389 cv_broadcast(kcondvar_t *cv)
390 {
391 
392 	rumpuser_cv_broadcast(RUMPCV(cv));
393 }
394 
395 bool
396 cv_has_waiters(kcondvar_t *cv)
397 {
398 
399 	return rumpuser_cv_has_waiters(RUMPCV(cv));
400 }
401 
402 /* this is not much of an attempt, but ... */
403 bool
404 cv_is_valid(kcondvar_t *cv)
405 {
406 
407 	return RUMPCV(cv) != NULL;
408 }
409