xref: /netbsd-src/sys/rump/librump/rumpkern/locks.c (revision b62fc9e20372b08e1785ff6d769312d209fa2005)
1 /*	$NetBSD: locks.c,v 1.39 2010/04/14 10:34:54 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007, 2008 Antti Kantee.  All Rights Reserved.
5  *
6  * Development of this software was supported by the
7  * Finnish Cultural Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.39 2010/04/14 10:34:54 pooka Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/kmem.h>
36 #include <sys/mutex.h>
37 #include <sys/rwlock.h>
38 
39 #include <rump/rumpuser.h>
40 
41 #include "rump_private.h"
42 
43 /*
44  * We map locks to pthread routines.  The difference between kernel
45  * and rumpuser routines is that while the kernel uses static
46  * storage, rumpuser allocates the object from the heap.  This
47  * indirection is necessary because we don't know the size of
48  * pthread objects here.  It is also beneficial, since we can
49  * be easily compatible with the kernel ABI because all kernel
50  * objects regardless of machine architecture are always at least
51  * the size of a pointer.  The downside, of course, is a performance
52  * penalty.
53  */
54 
55 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
56 
57 void
58 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
59 {
60 
61 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
62 
63 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx);
64 }
65 
66 void
67 mutex_destroy(kmutex_t *mtx)
68 {
69 
70 	rumpuser_mutex_destroy(RUMPMTX(mtx));
71 }
72 
73 void
74 mutex_enter(kmutex_t *mtx)
75 {
76 
77 	rumpuser_mutex_enter(RUMPMTX(mtx));
78 }
79 
80 void
81 mutex_spin_enter(kmutex_t *mtx)
82 {
83 
84 	mutex_enter(mtx);
85 }
86 
87 int
88 mutex_tryenter(kmutex_t *mtx)
89 {
90 
91 	return rumpuser_mutex_tryenter(RUMPMTX(mtx));
92 }
93 
94 void
95 mutex_exit(kmutex_t *mtx)
96 {
97 
98 	rumpuser_mutex_exit(RUMPMTX(mtx));
99 }
100 
101 void
102 mutex_spin_exit(kmutex_t *mtx)
103 {
104 
105 	mutex_exit(mtx);
106 }
107 
108 int
109 mutex_owned(kmutex_t *mtx)
110 {
111 
112 	return rumpuser_mutex_held(RUMPMTX(mtx));
113 }
114 
115 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
116 
117 /* reader/writer locks */
118 
119 void
120 rw_init(krwlock_t *rw)
121 {
122 
123 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
124 
125 	rumpuser_rw_init((struct rumpuser_rw **)rw);
126 }
127 
128 void
129 rw_destroy(krwlock_t *rw)
130 {
131 
132 	rumpuser_rw_destroy(RUMPRW(rw));
133 }
134 
135 void
136 rw_enter(krwlock_t *rw, const krw_t op)
137 {
138 
139 	rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
140 }
141 
142 int
143 rw_tryenter(krwlock_t *rw, const krw_t op)
144 {
145 
146 	return rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
147 }
148 
149 void
150 rw_exit(krwlock_t *rw)
151 {
152 
153 	rumpuser_rw_exit(RUMPRW(rw));
154 }
155 
156 /* always fails */
157 int
158 rw_tryupgrade(krwlock_t *rw)
159 {
160 
161 	return 0;
162 }
163 
164 int
165 rw_write_held(krwlock_t *rw)
166 {
167 
168 	return rumpuser_rw_wrheld(RUMPRW(rw));
169 }
170 
171 int
172 rw_read_held(krwlock_t *rw)
173 {
174 
175 	return rumpuser_rw_rdheld(RUMPRW(rw));
176 }
177 
178 int
179 rw_lock_held(krwlock_t *rw)
180 {
181 
182 	return rumpuser_rw_held(RUMPRW(rw));
183 }
184 
185 /* curriculum vitaes */
186 
187 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
188 
189 void
190 cv_init(kcondvar_t *cv, const char *msg)
191 {
192 
193 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
194 
195 	rumpuser_cv_init((struct rumpuser_cv **)cv);
196 }
197 
198 void
199 cv_destroy(kcondvar_t *cv)
200 {
201 
202 	rumpuser_cv_destroy(RUMPCV(cv));
203 }
204 
205 void
206 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
207 {
208 
209 	if (rump_threads == 0)
210 		panic("cv_wait without threads");
211 	rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
212 }
213 
214 int
215 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
216 {
217 
218 	rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
219 	return 0;
220 }
221 
222 int
223 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
224 {
225 	struct timespec ts, tick;
226 	extern int hz;
227 
228 	/*
229 	 * XXX: this fetches rump kernel time, but rumpuser_cv_timedwait
230 	 * uses host time.
231 	 */
232 	nanotime(&ts);
233 	tick.tv_sec = ticks / hz;
234 	tick.tv_nsec = (ticks % hz) * (1000000000/hz);
235 	timespecadd(&ts, &tick, &ts);
236 
237 	if (ticks == 0) {
238 		cv_wait(cv, mtx);
239 		return 0;
240 	} else {
241 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
242 		    ts.tv_sec, ts.tv_nsec))
243 			return EWOULDBLOCK;
244 		else
245 			return 0;
246 	}
247 }
248 
249 int
250 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
251 {
252 
253 	return cv_timedwait(cv, mtx, ticks);
254 }
255 
256 void
257 cv_signal(kcondvar_t *cv)
258 {
259 
260 	rumpuser_cv_signal(RUMPCV(cv));
261 }
262 
263 void
264 cv_broadcast(kcondvar_t *cv)
265 {
266 
267 	rumpuser_cv_broadcast(RUMPCV(cv));
268 }
269 
270 bool
271 cv_has_waiters(kcondvar_t *cv)
272 {
273 
274 	return rumpuser_cv_has_waiters(RUMPCV(cv));
275 }
276 
277 /* this is not much of an attempt, but ... */
278 bool
279 cv_is_valid(kcondvar_t *cv)
280 {
281 
282 	return RUMPCV(cv) != NULL;
283 }
284 
285 /*
286  * giant lock
287  */
288 
289 static volatile int lockcnt;
290 
291 bool
292 kernel_biglocked()
293 {
294 
295 	return rumpuser_mutex_held(rump_giantlock) && lockcnt > 0;
296 }
297 
298 void
299 kernel_unlock_allbutone(int *countp)
300 {
301 	int minusone = lockcnt-1;
302 
303 	KASSERT(kernel_biglocked());
304 	if (minusone) {
305 		_kernel_unlock(minusone, countp);
306 	}
307 	KASSERT(lockcnt == 1);
308 	*countp = minusone;
309 
310 	/*
311 	 * We drop lockcnt to 0 since rumpuser doesn't know that the
312 	 * kernel biglock is being used as the interlock for cv in
313 	 * tsleep.
314 	 */
315 	lockcnt = 0;
316 }
317 
318 void
319 kernel_ununlock_allbutone(int nlocks)
320 {
321 
322 	KASSERT(rumpuser_mutex_held(rump_giantlock) && lockcnt == 0);
323 	lockcnt = 1;
324 	_kernel_lock(nlocks);
325 }
326 
327 void
328 _kernel_lock(int nlocks)
329 {
330 
331 	while (nlocks--) {
332 		if (!rumpuser_mutex_tryenter(rump_giantlock)) {
333 			struct lwp *l = curlwp;
334 
335 			rump_unschedule_cpu1(l);
336 			rumpuser_mutex_enter_nowrap(rump_giantlock);
337 			rump_schedule_cpu(l);
338 		}
339 		lockcnt++;
340 	}
341 }
342 
343 void
344 _kernel_unlock(int nlocks, int *countp)
345 {
346 
347 	if (!rumpuser_mutex_held(rump_giantlock)) {
348 		KASSERT(nlocks == 0);
349 		if (countp)
350 			*countp = 0;
351 		return;
352 	}
353 
354 	if (countp)
355 		*countp = lockcnt;
356 	if (nlocks == 0)
357 		nlocks = lockcnt;
358 	if (nlocks == -1) {
359 		KASSERT(lockcnt == 1);
360 		nlocks = 1;
361 	}
362 	KASSERT(nlocks <= lockcnt);
363 	while (nlocks--) {
364 		lockcnt--;
365 		rumpuser_mutex_exit(rump_giantlock);
366 	}
367 }
368 
369 void
370 rump_user_unschedule(int nlocks, int *countp)
371 {
372 
373 	_kernel_unlock(nlocks, countp);
374 	/*
375 	 * XXX: technically we should unschedule_cpu1() here, but that
376 	 * requires rump_intr_enter/exit to be implemented.
377 	 */
378 	rump_unschedule_cpu(curlwp);
379 }
380 
381 void
382 rump_user_schedule(int nlocks)
383 {
384 
385 	rump_schedule_cpu(curlwp);
386 
387 	if (nlocks)
388 		_kernel_lock(nlocks);
389 }
390