xref: /netbsd-src/sys/rump/librump/rumpkern/locks.c (revision 3007f1403a55612b606327e9e6f0e6e356b5bfa2)
1 /*	$NetBSD: locks.c,v 1.88 2023/11/02 10:31:55 martin Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.88 2023/11/02 10:31:55 martin Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35 
36 #include <rump-sys/kern.h>
37 
38 #include <rump/rumpuser.h>
39 
40 #ifdef LOCKDEBUG
41 const int rump_lockdebug = 1;
42 #else
43 const int rump_lockdebug = 0;
44 #endif
45 
46 /*
47  * Simple lockdebug.  If it's compiled in, it's always active.
48  * Currently available only for mtx/rwlock.
49  */
50 #ifdef LOCKDEBUG
51 #include <sys/lockdebug.h>
52 
53 static lockops_t mutex_spin_lockops = {
54 	.lo_name = "mutex",
55 	.lo_type = LOCKOPS_SPIN,
56 	.lo_dump = NULL,
57 };
58 static lockops_t mutex_adaptive_lockops = {
59 	.lo_name = "mutex",
60 	.lo_type = LOCKOPS_SLEEP,
61 	.lo_dump = NULL,
62 };
63 static lockops_t rw_lockops = {
64 	.lo_name = "rwlock",
65 	.lo_type = LOCKOPS_SLEEP,
66 	.lo_dump = NULL,
67 };
68 
69 #define ALLOCK(lock, ops, return_address)		\
70 	lockdebug_alloc(__func__, __LINE__, lock, ops,	\
71 	    return_address)
72 #define FREELOCK(lock)					\
73 	lockdebug_free(__func__, __LINE__, lock)
74 #define WANTLOCK(lock, shar)				\
75 	lockdebug_wantlock(__func__, __LINE__, lock,	\
76 	    (uintptr_t)__builtin_return_address(0), shar)
77 #define LOCKED(lock, shar)				\
78 	lockdebug_locked(__func__, __LINE__, lock, NULL,\
79 	    (uintptr_t)__builtin_return_address(0), shar)
80 #define UNLOCKED(lock, shar)				\
81 	lockdebug_unlocked(__func__, __LINE__, lock,	\
82 	    (uintptr_t)__builtin_return_address(0), shar)
83 #define BARRIER(lock, slp)				\
84 	lockdebug_barrier(__func__, __LINE__, lock, slp)
85 #else
86 #define ALLOCK(a, b, c)	do {} while (0)
87 #define FREELOCK(a)	do {} while (0)
88 #define WANTLOCK(a, b)	do {} while (0)
89 #define LOCKED(a, b)	do {} while (0)
90 #define UNLOCKED(a, b)	do {} while (0)
91 #define BARRIER(a, b)	do {} while (0)
92 #endif
93 
94 /*
95  * We map locks to pthread routines.  The difference between kernel
96  * and rumpuser routines is that while the kernel uses static
97  * storage, rumpuser allocates the object from the heap.  This
98  * indirection is necessary because we don't know the size of
99  * pthread objects here.  It is also beneficial, since we can
100  * be easily compatible with the kernel ABI because all kernel
101  * objects regardless of machine architecture are always at least
102  * the size of a pointer.  The downside, of course, is a performance
103  * penalty.
104  */
105 
106 #define RUMPMTX(mtx) (*(struct rumpuser_mtx *const *)(mtx))
107 
108 void
_mutex_init(kmutex_t * mtx,kmutex_type_t type,int ipl,uintptr_t return_address)109 _mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl,
110     uintptr_t return_address)
111 {
112 	int ruflags = RUMPUSER_MTX_KMUTEX;
113 	int isspin;
114 
115 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
116 
117 	/*
118 	 * Try to figure out if the caller wanted a spin mutex or
119 	 * not with this easy set of conditionals.  The difference
120 	 * between a spin mutex and an adaptive mutex for a rump
121 	 * kernel is that the hypervisor does not relinquish the
122 	 * rump kernel CPU context for a spin mutex.  The
123 	 * hypervisor itself may block even when "spinning".
124 	 */
125 	if (type == MUTEX_SPIN) {
126 		isspin = 1;
127 	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
128 	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
129 	    ipl == IPL_SOFTSERIAL) {
130 		isspin = 0;
131 	} else {
132 		isspin = 1;
133 	}
134 
135 	if (isspin)
136 		ruflags |= RUMPUSER_MTX_SPIN;
137 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
138 	if (isspin)
139 		ALLOCK(mtx, &mutex_spin_lockops, return_address);
140 	else
141 		ALLOCK(mtx, &mutex_adaptive_lockops, return_address);
142 }
143 
144 void
mutex_init(kmutex_t * mtx,kmutex_type_t type,int ipl)145 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
146 {
147 
148 	_mutex_init(mtx, type, ipl, (uintptr_t)__builtin_return_address(0));
149 }
150 
151 void
mutex_destroy(kmutex_t * mtx)152 mutex_destroy(kmutex_t *mtx)
153 {
154 
155 	FREELOCK(mtx);
156 	rumpuser_mutex_destroy(RUMPMTX(mtx));
157 }
158 
159 void
mutex_enter(kmutex_t * mtx)160 mutex_enter(kmutex_t *mtx)
161 {
162 
163 	WANTLOCK(mtx, 0);
164 	if (!rumpuser_mutex_spin_p(RUMPMTX(mtx)))
165 		BARRIER(mtx, 1);
166 	rumpuser_mutex_enter(RUMPMTX(mtx));
167 	LOCKED(mtx, false);
168 }
169 
170 void
mutex_spin_enter(kmutex_t * mtx)171 mutex_spin_enter(kmutex_t *mtx)
172 {
173 
174 	KASSERT(rumpuser_mutex_spin_p(RUMPMTX(mtx)));
175 	WANTLOCK(mtx, 0);
176 	rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
177 	LOCKED(mtx, false);
178 }
179 
180 int
mutex_tryenter(kmutex_t * mtx)181 mutex_tryenter(kmutex_t *mtx)
182 {
183 	int error;
184 
185 	error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
186 	if (error == 0) {
187 		WANTLOCK(mtx, 0);
188 		LOCKED(mtx, false);
189 	}
190 	return error == 0;
191 }
192 
193 void
mutex_exit(kmutex_t * mtx)194 mutex_exit(kmutex_t *mtx)
195 {
196 
197 #ifndef LOCKDEBUG
198 	KASSERT(mutex_owned(mtx));
199 #endif
200 	UNLOCKED(mtx, false);
201 	rumpuser_mutex_exit(RUMPMTX(mtx));
202 }
203 __strong_alias(mutex_spin_exit,mutex_exit);
204 
205 int
mutex_ownable(const kmutex_t * mtx)206 mutex_ownable(const kmutex_t *mtx)
207 {
208 
209 #ifdef LOCKDEBUG
210 	WANTLOCK(mtx, -1);
211 #endif
212 	return 1;
213 }
214 
215 int
mutex_owned(const kmutex_t * mtx)216 mutex_owned(const kmutex_t *mtx)
217 {
218 	struct lwp *l;
219 
220 	rumpuser_mutex_owner(RUMPMTX(mtx), &l);
221 	return l == curlwp;
222 }
223 
224 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
225 
226 /* reader/writer locks */
227 
228 static enum rumprwlock
krw2rumprw(const krw_t op)229 krw2rumprw(const krw_t op)
230 {
231 
232 	switch (op) {
233 	case RW_READER:
234 		return RUMPUSER_RW_READER;
235 	case RW_WRITER:
236 		return RUMPUSER_RW_WRITER;
237 	default:
238 		panic("unknown rwlock type");
239 	}
240 }
241 
242 void
_rw_init(krwlock_t * rw,uintptr_t return_address)243 _rw_init(krwlock_t *rw, uintptr_t return_address)
244 {
245 
246 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
247 
248 	rumpuser_rw_init((struct rumpuser_rw **)rw);
249 	ALLOCK(rw, &rw_lockops, return_address);
250 }
251 
252 void
rw_init(krwlock_t * rw)253 rw_init(krwlock_t *rw)
254 {
255 
256 	_rw_init(rw, (uintptr_t)__builtin_return_address(0));
257 }
258 
259 void
rw_destroy(krwlock_t * rw)260 rw_destroy(krwlock_t *rw)
261 {
262 
263 	FREELOCK(rw);
264 	rumpuser_rw_destroy(RUMPRW(rw));
265 }
266 
267 void
rw_enter(krwlock_t * rw,const krw_t op)268 rw_enter(krwlock_t *rw, const krw_t op)
269 {
270 
271 	WANTLOCK(rw, op == RW_READER);
272 	BARRIER(rw, 1);
273 	rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
274 	LOCKED(rw, op == RW_READER);
275 }
276 
277 int
rw_tryenter(krwlock_t * rw,const krw_t op)278 rw_tryenter(krwlock_t *rw, const krw_t op)
279 {
280 	int error;
281 
282 	error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
283 	if (error == 0) {
284 		WANTLOCK(rw, op == RW_READER);
285 		LOCKED(rw, op == RW_READER);
286 	}
287 	return error == 0;
288 }
289 
290 void
rw_exit(krwlock_t * rw)291 rw_exit(krwlock_t *rw)
292 {
293 
294 #ifdef LOCKDEBUG
295 	bool shared = !rw_write_held(rw);
296 
297 	if (shared)
298 		KASSERT(rw_read_held(rw));
299 	UNLOCKED(rw, shared);
300 #endif
301 	rumpuser_rw_exit(RUMPRW(rw));
302 }
303 
304 int
rw_tryupgrade(krwlock_t * rw)305 rw_tryupgrade(krwlock_t *rw)
306 {
307 	int rv;
308 
309 	rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
310 	if (rv == 0) {
311 		UNLOCKED(rw, 1);
312 		WANTLOCK(rw, 0);
313 		LOCKED(rw, 0);
314 	}
315 	return rv == 0;
316 }
317 
318 void
rw_downgrade(krwlock_t * rw)319 rw_downgrade(krwlock_t *rw)
320 {
321 
322 	rumpuser_rw_downgrade(RUMPRW(rw));
323 	UNLOCKED(rw, 0);
324 	WANTLOCK(rw, 1);
325 	LOCKED(rw, 1);
326 }
327 
328 int
rw_read_held(krwlock_t * rw)329 rw_read_held(krwlock_t *rw)
330 {
331 	int rv;
332 
333 	rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
334 	return rv;
335 }
336 
337 int
rw_write_held(krwlock_t * rw)338 rw_write_held(krwlock_t *rw)
339 {
340 	int rv;
341 
342 	rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
343 	return rv;
344 }
345 
346 int
rw_lock_held(krwlock_t * rw)347 rw_lock_held(krwlock_t *rw)
348 {
349 
350 	return rw_read_held(rw) || rw_write_held(rw);
351 }
352 
353 krw_t
rw_lock_op(krwlock_t * rw)354 rw_lock_op(krwlock_t *rw)
355 {
356 
357 	return rw_write_held(rw) ? RW_WRITER : RW_READER;
358 }
359 
360 /* curriculum vitaes */
361 
362 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
363 
364 void
cv_init(kcondvar_t * cv,const char * msg)365 cv_init(kcondvar_t *cv, const char *msg)
366 {
367 
368 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
369 
370 	rumpuser_cv_init((struct rumpuser_cv **)cv);
371 }
372 
373 void
cv_destroy(kcondvar_t * cv)374 cv_destroy(kcondvar_t *cv)
375 {
376 
377 	rumpuser_cv_destroy(RUMPCV(cv));
378 }
379 
380 static int
docvwait(kcondvar_t * cv,kmutex_t * mtx,struct timespec * ts)381 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
382 {
383 	struct lwp *l = curlwp;
384 	int rv;
385 
386 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
387 		/*
388 		 * yield() here, someone might want the cpu
389 		 * to set a condition.  otherwise we'll just
390 		 * loop forever.
391 		 */
392 		yield();
393 		return EINTR;
394 	}
395 
396 	UNLOCKED(mtx, false);
397 
398 	l->l_sched.info = cv;
399 	rv = 0;
400 	if (ts) {
401 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
402 		    ts->tv_sec, ts->tv_nsec))
403 			rv = EWOULDBLOCK;
404 	} else {
405 		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
406 	}
407 
408 	LOCKED(mtx, false);
409 
410 	/*
411 	 * Check for QEXIT.  if so, we need to wait here until we
412 	 * are allowed to exit.
413 	 */
414 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
415 		struct proc *p = l->l_proc;
416 
417 		mutex_exit(mtx); /* drop and retake later */
418 
419 		mutex_enter(p->p_lock);
420 		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
421 			/* avoid recursion */
422 			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
423 			    RUMPMTX(p->p_lock));
424 		}
425 		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
426 		mutex_exit(p->p_lock);
427 
428 		/* ok, we can exit and remove "reference" to l->l_sched.info */
429 
430 		mutex_enter(mtx);
431 		rv = EINTR;
432 	}
433 	l->l_sched.info = NULL;
434 
435 	return rv;
436 }
437 
438 void
cv_wait(kcondvar_t * cv,kmutex_t * mtx)439 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
440 {
441 
442 	if (__predict_false(rump_threads == 0))
443 		panic("cv_wait without threads");
444 	(void) docvwait(cv, mtx, NULL);
445 }
446 
447 int
cv_wait_sig(kcondvar_t * cv,kmutex_t * mtx)448 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
449 {
450 
451 	if (__predict_false(rump_threads == 0))
452 		panic("cv_wait without threads");
453 	return docvwait(cv, mtx, NULL);
454 }
455 
456 int
cv_timedwait(kcondvar_t * cv,kmutex_t * mtx,int ticks)457 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
458 {
459 	struct timespec ts;
460 	extern int hz;
461 	int rv;
462 
463 	if (ticks == 0) {
464 		rv = cv_wait_sig(cv, mtx);
465 	} else {
466 		ts.tv_sec = ticks / hz;
467 		ts.tv_nsec = (ticks % hz) * (1000000000/hz);
468 		rv = docvwait(cv, mtx, &ts);
469 	}
470 
471 	return rv;
472 }
473 __strong_alias(cv_timedwait_sig,cv_timedwait);
474 
475 void
cv_signal(kcondvar_t * cv)476 cv_signal(kcondvar_t *cv)
477 {
478 
479 	rumpuser_cv_signal(RUMPCV(cv));
480 }
481 
482 void
cv_broadcast(kcondvar_t * cv)483 cv_broadcast(kcondvar_t *cv)
484 {
485 
486 	rumpuser_cv_broadcast(RUMPCV(cv));
487 }
488 
489 bool
cv_has_waiters(kcondvar_t * cv)490 cv_has_waiters(kcondvar_t *cv)
491 {
492 	int rv;
493 
494 	rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
495 	return rv != 0;
496 }
497 
498 /* this is not much of an attempt, but ... */
499 bool
cv_is_valid(kcondvar_t * cv)500 cv_is_valid(kcondvar_t *cv)
501 {
502 
503 	return RUMPCV(cv) != NULL;
504 }
505