xref: /netbsd-src/sys/rump/librump/rumpkern/locks.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: locks.c,v 1.80 2018/02/05 05:00:48 ozaki-r Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.80 2018/02/05 05:00:48 ozaki-r Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35 
36 #include <rump-sys/kern.h>
37 
38 #include <rump/rumpuser.h>
39 
40 #ifdef LOCKDEBUG
41 const int rump_lockdebug = 1;
42 #else
43 const int rump_lockdebug = 0;
44 #endif
45 
46 /*
47  * Simple lockdebug.  If it's compiled in, it's always active.
48  * Currently available only for mtx/rwlock.
49  */
50 #ifdef LOCKDEBUG
51 #include <sys/lockdebug.h>
52 
53 static lockops_t mutex_spin_lockops = {
54 	.lo_name = "mutex",
55 	.lo_type = LOCKOPS_SPIN,
56 	.lo_dump = NULL,
57 };
58 static lockops_t mutex_adaptive_lockops = {
59 	.lo_name = "mutex",
60 	.lo_type = LOCKOPS_SLEEP,
61 	.lo_dump = NULL,
62 };
63 static lockops_t rw_lockops = {
64 	.lo_name = "rwlock",
65 	.lo_type = LOCKOPS_SLEEP,
66 	.lo_dump = NULL,
67 };
68 
69 #define ALLOCK(lock, ops, return_address)		\
70 	lockdebug_alloc(__func__, __LINE__, lock, ops,	\
71 	    return_address)
72 #define FREELOCK(lock)					\
73 	lockdebug_free(__func__, __LINE__, lock)
74 #define WANTLOCK(lock, shar)				\
75 	lockdebug_wantlock(__func__, __LINE__, lock,	\
76 	    (uintptr_t)__builtin_return_address(0), shar)
77 #define LOCKED(lock, shar)				\
78 	lockdebug_locked(__func__, __LINE__, lock, NULL,\
79 	    (uintptr_t)__builtin_return_address(0), shar)
80 #define UNLOCKED(lock, shar)				\
81 	lockdebug_unlocked(__func__, __LINE__, lock,	\
82 	    (uintptr_t)__builtin_return_address(0), shar)
83 #define BARRIER(lock, slp)				\
84 	lockdebug_barrier(__func__, __LINE__, lock, slp)
85 #else
86 #define ALLOCK(a, b, c)	do {} while (0)
87 #define FREELOCK(a)	do {} while (0)
88 #define WANTLOCK(a, b)	do {} while (0)
89 #define LOCKED(a, b)	do {} while (0)
90 #define UNLOCKED(a, b)	do {} while (0)
91 #define BARRIER(a, b)	do {} while (0)
92 #endif
93 
94 /*
95  * We map locks to pthread routines.  The difference between kernel
96  * and rumpuser routines is that while the kernel uses static
97  * storage, rumpuser allocates the object from the heap.  This
98  * indirection is necessary because we don't know the size of
99  * pthread objects here.  It is also beneficial, since we can
100  * be easily compatible with the kernel ABI because all kernel
101  * objects regardless of machine architecture are always at least
102  * the size of a pointer.  The downside, of course, is a performance
103  * penalty.
104  */
105 
106 #define RUMPMTX(mtx) (*(struct rumpuser_mtx *const*)(mtx))
107 
108 void _mutex_init(kmutex_t *, kmutex_type_t, int, uintptr_t);
109 void
110 _mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl, uintptr_t return_address)
111 {
112 	int ruflags = RUMPUSER_MTX_KMUTEX;
113 	int isspin;
114 
115 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
116 
117 	/*
118 	 * Try to figure out if the caller wanted a spin mutex or
119 	 * not with this easy set of conditionals.  The difference
120 	 * between a spin mutex and an adaptive mutex for a rump
121 	 * kernel is that the hypervisor does not relinquish the
122 	 * rump kernel CPU context for a spin mutex.  The
123 	 * hypervisor itself may block even when "spinning".
124 	 */
125 	if (type == MUTEX_SPIN) {
126 		isspin = 1;
127 	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
128 	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
129 	    ipl == IPL_SOFTSERIAL) {
130 		isspin = 0;
131 	} else {
132 		isspin = 1;
133 	}
134 
135 	if (isspin)
136 		ruflags |= RUMPUSER_MTX_SPIN;
137 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
138 	if (isspin)
139 		ALLOCK(mtx, &mutex_spin_lockops, return_address);
140 	else
141 		ALLOCK(mtx, &mutex_adaptive_lockops, return_address);
142 }
143 
144 void
145 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
146 {
147 
148 	_mutex_init(mtx, type, ipl, (uintptr_t)__builtin_return_address(0));
149 }
150 
151 void
152 mutex_destroy(kmutex_t *mtx)
153 {
154 
155 	FREELOCK(mtx);
156 	rumpuser_mutex_destroy(RUMPMTX(mtx));
157 }
158 
159 void
160 mutex_enter(kmutex_t *mtx)
161 {
162 
163 	WANTLOCK(mtx, 0);
164 	if (!rumpuser_mutex_spin_p(RUMPMTX(mtx)))
165 		BARRIER(mtx, 1);
166 	rumpuser_mutex_enter(RUMPMTX(mtx));
167 	LOCKED(mtx, false);
168 }
169 
170 void
171 mutex_spin_enter(kmutex_t *mtx)
172 {
173 
174 	KASSERT(rumpuser_mutex_spin_p(RUMPMTX(mtx)));
175 	WANTLOCK(mtx, 0);
176 	rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
177 	LOCKED(mtx, false);
178 }
179 
180 int
181 mutex_tryenter(kmutex_t *mtx)
182 {
183 	int error;
184 
185 	error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
186 	if (error == 0) {
187 		WANTLOCK(mtx, 0);
188 		LOCKED(mtx, false);
189 	}
190 	return error == 0;
191 }
192 
193 void
194 mutex_exit(kmutex_t *mtx)
195 {
196 
197 #ifndef LOCKDEBUG
198 	KASSERT(mutex_owned(mtx));
199 #endif
200 	UNLOCKED(mtx, false);
201 	rumpuser_mutex_exit(RUMPMTX(mtx));
202 }
203 __strong_alias(mutex_spin_exit,mutex_exit);
204 
205 int
206 mutex_ownable(const kmutex_t *mtx)
207 {
208 
209 #ifdef LOCKDEBUG
210 	WANTLOCK(mtx, -1);
211 #endif
212 	return 1;
213 }
214 
215 int
216 mutex_owned(const kmutex_t *mtx)
217 {
218 
219 	return mutex_owner(mtx) == curlwp;
220 }
221 
222 lwp_t *
223 mutex_owner(const kmutex_t *mtx)
224 {
225 	struct lwp *l;
226 
227 	rumpuser_mutex_owner(RUMPMTX(mtx), &l);
228 	return l;
229 }
230 
231 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
232 
233 /* reader/writer locks */
234 
235 static enum rumprwlock
236 krw2rumprw(const krw_t op)
237 {
238 
239 	switch (op) {
240 	case RW_READER:
241 		return RUMPUSER_RW_READER;
242 	case RW_WRITER:
243 		return RUMPUSER_RW_WRITER;
244 	default:
245 		panic("unknown rwlock type");
246 	}
247 }
248 
249 void _rw_init(krwlock_t *, uintptr_t);
250 void
251 _rw_init(krwlock_t *rw, uintptr_t return_address)
252 {
253 
254 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
255 
256 	rumpuser_rw_init((struct rumpuser_rw **)rw);
257 	ALLOCK(rw, &rw_lockops, return_address);
258 }
259 
260 void
261 rw_init(krwlock_t *rw)
262 {
263 
264 	_rw_init(rw, (uintptr_t)__builtin_return_address(0));
265 }
266 
267 void
268 rw_destroy(krwlock_t *rw)
269 {
270 
271 	FREELOCK(rw);
272 	rumpuser_rw_destroy(RUMPRW(rw));
273 }
274 
275 void
276 rw_enter(krwlock_t *rw, const krw_t op)
277 {
278 
279 	WANTLOCK(rw, op == RW_READER);
280 	BARRIER(rw, 1);
281 	rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
282 	LOCKED(rw, op == RW_READER);
283 }
284 
285 int
286 rw_tryenter(krwlock_t *rw, const krw_t op)
287 {
288 	int error;
289 
290 	error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
291 	if (error == 0) {
292 		WANTLOCK(rw, op == RW_READER);
293 		LOCKED(rw, op == RW_READER);
294 	}
295 	return error == 0;
296 }
297 
298 void
299 rw_exit(krwlock_t *rw)
300 {
301 
302 #ifdef LOCKDEBUG
303 	bool shared = !rw_write_held(rw);
304 
305 	if (shared)
306 		KASSERT(rw_read_held(rw));
307 	UNLOCKED(rw, shared);
308 #endif
309 	rumpuser_rw_exit(RUMPRW(rw));
310 }
311 
312 int
313 rw_tryupgrade(krwlock_t *rw)
314 {
315 	int rv;
316 
317 	rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
318 	if (rv == 0) {
319 		UNLOCKED(rw, 1);
320 		WANTLOCK(rw, 0);
321 		LOCKED(rw, 0);
322 	}
323 	return rv == 0;
324 }
325 
326 void
327 rw_downgrade(krwlock_t *rw)
328 {
329 
330 	rumpuser_rw_downgrade(RUMPRW(rw));
331 	UNLOCKED(rw, 0);
332 	WANTLOCK(rw, 1);
333 	LOCKED(rw, 1);
334 }
335 
336 int
337 rw_read_held(krwlock_t *rw)
338 {
339 	int rv;
340 
341 	rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
342 	return rv;
343 }
344 
345 int
346 rw_write_held(krwlock_t *rw)
347 {
348 	int rv;
349 
350 	rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
351 	return rv;
352 }
353 
354 int
355 rw_lock_held(krwlock_t *rw)
356 {
357 
358 	return rw_read_held(rw) || rw_write_held(rw);
359 }
360 
361 /* curriculum vitaes */
362 
363 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
364 
365 void
366 cv_init(kcondvar_t *cv, const char *msg)
367 {
368 
369 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
370 
371 	rumpuser_cv_init((struct rumpuser_cv **)cv);
372 }
373 
374 void
375 cv_destroy(kcondvar_t *cv)
376 {
377 
378 	rumpuser_cv_destroy(RUMPCV(cv));
379 }
380 
381 static int
382 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
383 {
384 	struct lwp *l = curlwp;
385 	int rv;
386 
387 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
388 		/*
389 		 * yield() here, someone might want the cpu
390 		 * to set a condition.  otherwise we'll just
391 		 * loop forever.
392 		 */
393 		yield();
394 		return EINTR;
395 	}
396 
397 	UNLOCKED(mtx, false);
398 
399 	l->l_private = cv;
400 	rv = 0;
401 	if (ts) {
402 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
403 		    ts->tv_sec, ts->tv_nsec))
404 			rv = EWOULDBLOCK;
405 	} else {
406 		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
407 	}
408 
409 	LOCKED(mtx, false);
410 
411 	/*
412 	 * Check for QEXIT.  if so, we need to wait here until we
413 	 * are allowed to exit.
414 	 */
415 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
416 		struct proc *p = l->l_proc;
417 
418 		mutex_exit(mtx); /* drop and retake later */
419 
420 		mutex_enter(p->p_lock);
421 		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
422 			/* avoid recursion */
423 			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
424 			    RUMPMTX(p->p_lock));
425 		}
426 		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
427 		mutex_exit(p->p_lock);
428 
429 		/* ok, we can exit and remove "reference" to l->private */
430 
431 		mutex_enter(mtx);
432 		rv = EINTR;
433 	}
434 	l->l_private = NULL;
435 
436 	return rv;
437 }
438 
439 void
440 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
441 {
442 
443 	if (__predict_false(rump_threads == 0))
444 		panic("cv_wait without threads");
445 	(void) docvwait(cv, mtx, NULL);
446 }
447 
448 int
449 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
450 {
451 
452 	if (__predict_false(rump_threads == 0))
453 		panic("cv_wait without threads");
454 	return docvwait(cv, mtx, NULL);
455 }
456 
457 int
458 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
459 {
460 	struct timespec ts;
461 	extern int hz;
462 	int rv;
463 
464 	if (ticks == 0) {
465 		rv = cv_wait_sig(cv, mtx);
466 	} else {
467 		ts.tv_sec = ticks / hz;
468 		ts.tv_nsec = (ticks % hz) * (1000000000/hz);
469 		rv = docvwait(cv, mtx, &ts);
470 	}
471 
472 	return rv;
473 }
474 __strong_alias(cv_timedwait_sig,cv_timedwait);
475 
476 void
477 cv_signal(kcondvar_t *cv)
478 {
479 
480 	rumpuser_cv_signal(RUMPCV(cv));
481 }
482 
483 void
484 cv_broadcast(kcondvar_t *cv)
485 {
486 
487 	rumpuser_cv_broadcast(RUMPCV(cv));
488 }
489 
490 bool
491 cv_has_waiters(kcondvar_t *cv)
492 {
493 	int rv;
494 
495 	rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
496 	return rv != 0;
497 }
498 
499 /* this is not much of an attempt, but ... */
500 bool
501 cv_is_valid(kcondvar_t *cv)
502 {
503 
504 	return RUMPCV(cv) != NULL;
505 }
506