xref: /netbsd-src/lib/librumpuser/rumpuser_pth.c (revision 4e6df137e8e14049b5a701d249962c480449c141)
1 /*	$NetBSD: rumpuser_pth.c,v 1.1 2010/02/26 18:54:20 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2010 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #if !defined(lint)
30 __RCSID("$NetBSD: rumpuser_pth.c,v 1.1 2010/02/26 18:54:20 pooka Exp $");
31 #endif /* !lint */
32 
33 #ifdef __linux__
34 #define _XOPEN_SOURCE 500
35 #define _BSD_SOURCE
36 #define _FILE_OFFSET_BITS 64
37 #endif
38 
39 #include <assert.h>
40 #include <errno.h>
41 #include <pthread.h>
42 #include <stdlib.h>
43 #include <stdio.h>
44 #include <string.h>
45 #include <stdint.h>
46 #include <unistd.h>
47 
48 #include <rump/rumpuser.h>
49 
50 #include "rumpuser_int.h"
51 
52 static pthread_key_t curlwpkey;
53 
54 #define NOFAIL(a) do {if (!(a)) abort();} while (/*CONSTCOND*/0)
55 #define NOFAIL_ERRNO(a)							\
56 do {									\
57 	int fail_rv = (a);						\
58 	if (fail_rv) {							\
59 		printf("panic: rumpuser fatal failure %d (%s)\n",	\
60 		    fail_rv, strerror(fail_rv));			\
61 		    abort();						\
62 	}								\
63 } while (/*CONSTCOND*/0)
64 
65 #define RUMTX_INCRECURSION(mtx) ((mtx)->recursion++)
66 #define RUMTX_DECRECURSION(mtx) ((mtx)->recursion--)
67 struct rumpuser_mtx {
68 	pthread_mutex_t pthmtx;
69 	pthread_t owner;
70 	unsigned recursion;
71 };
72 
73 #define RURW_AMWRITER(rw) (pthread_equal(rw->writer, pthread_self())	\
74 				&& rw->readers == -1)
75 #define RURW_HASREAD(rw)  (rw->readers > 0)
76 
77 #define RURW_SETWRITE(rw)						\
78 do {									\
79 	assert(rw->readers == 0);					\
80 	rw->writer = pthread_self();					\
81 	rw->readers = -1;						\
82 } while (/*CONSTCOND*/0)
83 #define RURW_CLRWRITE(rw)						\
84 do {									\
85 	assert(rw->readers == -1 && RURW_AMWRITER(rw));			\
86 	rw->readers = 0;						\
87 } while (/*CONSTCOND*/0)
88 #define RURW_INCREAD(rw)						\
89 do {									\
90 	pthread_spin_lock(&rw->spin);					\
91 	assert(rw->readers >= 0);					\
92 	++(rw)->readers;						\
93 	pthread_spin_unlock(&rw->spin);					\
94 } while (/*CONSTCOND*/0)
95 #define RURW_DECREAD(rw)						\
96 do {									\
97 	pthread_spin_lock(&rw->spin);					\
98 	assert(rw->readers > 0);					\
99 	--(rw)->readers;						\
100 	pthread_spin_unlock(&rw->spin);					\
101 } while (/*CONSTCOND*/0)
102 
103 struct rumpuser_rw {
104 	pthread_rwlock_t pthrw;
105 	pthread_spinlock_t spin;
106 	int readers;
107 	pthread_t writer;
108 };
109 
110 struct rumpuser_cv {
111 	pthread_cond_t pthcv;
112 	int nwaiters;
113 };
114 
115 struct rumpuser_mtx rumpuser_aio_mtx;
116 struct rumpuser_cv rumpuser_aio_cv;
117 int rumpuser_aio_head, rumpuser_aio_tail;
118 struct rumpuser_aio rumpuser_aios[N_AIOS];
119 
120 kernel_lockfn	rumpuser__klock;
121 kernel_unlockfn	rumpuser__kunlock;
122 int		rumpuser__wantthreads;
123 
124 void
125 /*ARGSUSED*/
126 rumpuser_biothread(void *arg)
127 {
128 	struct rumpuser_aio *rua;
129 	rump_biodone_fn biodone = arg;
130 	ssize_t rv;
131 	int error, dummy;
132 
133 	/* unschedule from CPU.  we reschedule before running the interrupt */
134 	rumpuser__kunlock(0, &dummy);
135 	assert(dummy == 0);
136 
137 	NOFAIL_ERRNO(pthread_mutex_lock(&rumpuser_aio_mtx.pthmtx));
138 	for (;;) {
139 		while (rumpuser_aio_head == rumpuser_aio_tail) {
140 			NOFAIL_ERRNO(pthread_cond_wait(&rumpuser_aio_cv.pthcv,
141 			    &rumpuser_aio_mtx.pthmtx));
142 		}
143 
144 		rua = &rumpuser_aios[rumpuser_aio_tail];
145 		assert(rua->rua_bp != NULL);
146 		pthread_mutex_unlock(&rumpuser_aio_mtx.pthmtx);
147 
148 		if (rua->rua_op & RUA_OP_READ) {
149 			error = 0;
150 			rv = pread(rua->rua_fd, rua->rua_data,
151 			    rua->rua_dlen, rua->rua_off);
152 			if (rv < 0) {
153 				rv = 0;
154 				error = errno;
155 			}
156 		} else {
157 			error = 0;
158 			rv = pwrite(rua->rua_fd, rua->rua_data,
159 			    rua->rua_dlen, rua->rua_off);
160 			if (rv < 0) {
161 				rv = 0;
162 				error = errno;
163 			} else if (rua->rua_op & RUA_OP_SYNC) {
164 #ifdef __NetBSD__
165 				fsync_range(rua->rua_fd, FDATASYNC,
166 				    rua->rua_off, rua->rua_dlen);
167 #else
168 				fsync(rua->rua_fd);
169 #endif
170 			}
171 		}
172 		rumpuser__klock(0);
173 		biodone(rua->rua_bp, (size_t)rv, error);
174 		rumpuser__kunlock(0, &dummy);
175 
176 		rua->rua_bp = NULL;
177 
178 		NOFAIL_ERRNO(pthread_mutex_lock(&rumpuser_aio_mtx.pthmtx));
179 		rumpuser_aio_tail = (rumpuser_aio_tail+1) % N_AIOS;
180 		pthread_cond_signal(&rumpuser_aio_cv.pthcv);
181 	}
182 
183 	/*NOTREACHED*/
184 	fprintf(stderr, "error: rumpuser_biothread reached unreachable\n");
185 	abort();
186 }
187 
188 void
189 rumpuser_thrinit(kernel_lockfn lockfn, kernel_unlockfn unlockfn, int threads)
190 {
191 
192 	pthread_mutex_init(&rumpuser_aio_mtx.pthmtx, NULL);
193 	pthread_cond_init(&rumpuser_aio_cv.pthcv, NULL);
194 
195 	pthread_key_create(&curlwpkey, NULL);
196 
197 	rumpuser__klock = lockfn;
198 	rumpuser__kunlock = unlockfn;
199 	rumpuser__wantthreads = threads;
200 }
201 
202 #if 0
203 void
204 rumpuser__thrdestroy(void)
205 {
206 
207 	pthread_key_delete(curlwpkey);
208 }
209 #endif
210 
211 int
212 rumpuser_thread_create(void *(*f)(void *), void *arg, const char *thrname)
213 {
214 	pthread_t ptid;
215 	int rv;
216 
217 	rv = pthread_create(&ptid, NULL, f, arg);
218 #ifdef __NetBSD__
219 	if (rv == 0 && thrname)
220 		pthread_setname_np(ptid, thrname, NULL);
221 #endif
222 
223 	return rv;
224 }
225 
226 __dead void
227 rumpuser_thread_exit(void)
228 {
229 
230 	pthread_exit(NULL);
231 }
232 
233 void
234 rumpuser_mutex_init(struct rumpuser_mtx **mtx)
235 {
236 	pthread_mutexattr_t att;
237 
238 	NOFAIL(*mtx = malloc(sizeof(struct rumpuser_mtx)));
239 
240 	pthread_mutexattr_init(&att);
241 	pthread_mutexattr_settype(&att, PTHREAD_MUTEX_ERRORCHECK);
242 	NOFAIL_ERRNO(pthread_mutex_init(&((*mtx)->pthmtx), &att));
243 	pthread_mutexattr_destroy(&att);
244 
245 	(*mtx)->owner = NULL;
246 	(*mtx)->recursion = 0;
247 }
248 
249 void
250 rumpuser_mutex_recursive_init(struct rumpuser_mtx **mtx)
251 {
252 	pthread_mutexattr_t mattr;
253 
254 	pthread_mutexattr_init(&mattr);
255 	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
256 
257 	NOFAIL(*mtx = malloc(sizeof(struct rumpuser_mtx)));
258 	NOFAIL_ERRNO(pthread_mutex_init(&((*mtx)->pthmtx), &mattr));
259 	(*mtx)->owner = NULL;
260 	(*mtx)->recursion = 0;
261 
262 	pthread_mutexattr_destroy(&mattr);
263 }
264 
265 static void
266 mtxenter(struct rumpuser_mtx *mtx)
267 {
268 
269 	if (mtx->recursion++ == 0) {
270 		assert(mtx->owner == NULL);
271 		mtx->owner = pthread_self();
272 	} else {
273 		assert(pthread_equal(mtx->owner, pthread_self()));
274 	}
275 }
276 
277 static void
278 mtxexit(struct rumpuser_mtx *mtx)
279 {
280 
281 	assert(mtx->owner != NULL);
282 	if (--mtx->recursion == 0)
283 		mtx->owner = NULL;
284 }
285 
286 void
287 rumpuser_mutex_enter(struct rumpuser_mtx *mtx)
288 {
289 
290 	if (pthread_mutex_trylock(&mtx->pthmtx) != 0)
291 		KLOCK_WRAP(NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx)));
292 	mtxenter(mtx);
293 }
294 
295 void
296 rumpuser_mutex_enter_nowrap(struct rumpuser_mtx *mtx)
297 {
298 
299 	NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx));
300 	mtxenter(mtx);
301 }
302 
303 int
304 rumpuser_mutex_tryenter(struct rumpuser_mtx *mtx)
305 {
306 	int rv;
307 
308 	rv = pthread_mutex_trylock(&mtx->pthmtx);
309 	if (rv == 0) {
310 		mtxenter(mtx);
311 	}
312 
313 	return rv == 0;
314 }
315 
316 void
317 rumpuser_mutex_exit(struct rumpuser_mtx *mtx)
318 {
319 
320 	mtxexit(mtx);
321 	NOFAIL_ERRNO(pthread_mutex_unlock(&mtx->pthmtx));
322 }
323 
324 void
325 rumpuser_mutex_destroy(struct rumpuser_mtx *mtx)
326 {
327 
328 	NOFAIL_ERRNO(pthread_mutex_destroy(&mtx->pthmtx));
329 	free(mtx);
330 }
331 
332 int
333 rumpuser_mutex_held(struct rumpuser_mtx *mtx)
334 {
335 
336 	return mtx->recursion && pthread_equal(mtx->owner, pthread_self());
337 }
338 
339 void
340 rumpuser_rw_init(struct rumpuser_rw **rw)
341 {
342 
343 	NOFAIL(*rw = malloc(sizeof(struct rumpuser_rw)));
344 	NOFAIL_ERRNO(pthread_rwlock_init(&((*rw)->pthrw), NULL));
345 	NOFAIL_ERRNO(pthread_spin_init(&((*rw)->spin), PTHREAD_PROCESS_SHARED));
346 	(*rw)->readers = 0;
347 	(*rw)->writer = NULL;
348 }
349 
350 void
351 rumpuser_rw_enter(struct rumpuser_rw *rw, int iswrite)
352 {
353 
354 	if (iswrite) {
355 		if (pthread_rwlock_trywrlock(&rw->pthrw) != 0)
356 			KLOCK_WRAP(NOFAIL_ERRNO(
357 			    pthread_rwlock_wrlock(&rw->pthrw)));
358 		RURW_SETWRITE(rw);
359 	} else {
360 		if (pthread_rwlock_tryrdlock(&rw->pthrw) != 0)
361 			KLOCK_WRAP(NOFAIL_ERRNO(
362 			    pthread_rwlock_rdlock(&rw->pthrw)));
363 		RURW_INCREAD(rw);
364 	}
365 }
366 
367 int
368 rumpuser_rw_tryenter(struct rumpuser_rw *rw, int iswrite)
369 {
370 	int rv;
371 
372 	if (iswrite) {
373 		rv = pthread_rwlock_trywrlock(&rw->pthrw);
374 		if (rv == 0)
375 			RURW_SETWRITE(rw);
376 	} else {
377 		rv = pthread_rwlock_tryrdlock(&rw->pthrw);
378 		if (rv == 0)
379 			RURW_INCREAD(rw);
380 	}
381 
382 	return rv == 0;
383 }
384 
385 void
386 rumpuser_rw_exit(struct rumpuser_rw *rw)
387 {
388 
389 	if (RURW_HASREAD(rw))
390 		RURW_DECREAD(rw);
391 	else
392 		RURW_CLRWRITE(rw);
393 	NOFAIL_ERRNO(pthread_rwlock_unlock(&rw->pthrw));
394 }
395 
396 void
397 rumpuser_rw_destroy(struct rumpuser_rw *rw)
398 {
399 
400 	NOFAIL_ERRNO(pthread_rwlock_destroy(&rw->pthrw));
401 	NOFAIL_ERRNO(pthread_spin_destroy(&rw->spin));
402 	free(rw);
403 }
404 
405 int
406 rumpuser_rw_held(struct rumpuser_rw *rw)
407 {
408 
409 	return rw->readers != 0;
410 }
411 
412 int
413 rumpuser_rw_rdheld(struct rumpuser_rw *rw)
414 {
415 
416 	return RURW_HASREAD(rw);
417 }
418 
419 int
420 rumpuser_rw_wrheld(struct rumpuser_rw *rw)
421 {
422 
423 	return RURW_AMWRITER(rw);
424 }
425 
426 void
427 rumpuser_cv_init(struct rumpuser_cv **cv)
428 {
429 
430 	NOFAIL(*cv = malloc(sizeof(struct rumpuser_cv)));
431 	NOFAIL_ERRNO(pthread_cond_init(&((*cv)->pthcv), NULL));
432 	(*cv)->nwaiters = 0;
433 }
434 
435 void
436 rumpuser_cv_destroy(struct rumpuser_cv *cv)
437 {
438 
439 	NOFAIL_ERRNO(pthread_cond_destroy(&cv->pthcv));
440 	free(cv);
441 }
442 
443 void
444 rumpuser_cv_wait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
445 {
446 
447 	cv->nwaiters++;
448 	assert(mtx->recursion == 1);
449 	mtxexit(mtx);
450 	KLOCK_WRAP(NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx)));
451 	mtxenter(mtx);
452 	cv->nwaiters--;
453 }
454 
455 void
456 rumpuser_cv_wait_nowrap(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
457 {
458 
459 	cv->nwaiters++;
460 	assert(mtx->recursion == 1);
461 	mtxexit(mtx);
462 	NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
463 	mtxenter(mtx);
464 	cv->nwaiters--;
465 }
466 
467 int
468 rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx,
469 	int64_t sec, int64_t nsec)
470 {
471 	struct timespec ts;
472 	int rv;
473 
474 	/* LINTED */
475 	ts.tv_sec = sec; ts.tv_nsec = nsec;
476 
477 	cv->nwaiters++;
478 	mtxexit(mtx);
479 	KLOCK_WRAP(rv = pthread_cond_timedwait(&cv->pthcv, &mtx->pthmtx, &ts));
480 	mtxenter(mtx);
481 	cv->nwaiters--;
482 	if (rv != 0 && rv != ETIMEDOUT)
483 		abort();
484 
485 	return rv == ETIMEDOUT;
486 }
487 
488 void
489 rumpuser_cv_signal(struct rumpuser_cv *cv)
490 {
491 
492 	NOFAIL_ERRNO(pthread_cond_signal(&cv->pthcv));
493 }
494 
495 void
496 rumpuser_cv_broadcast(struct rumpuser_cv *cv)
497 {
498 
499 	NOFAIL_ERRNO(pthread_cond_broadcast(&cv->pthcv));
500 }
501 
502 int
503 rumpuser_cv_has_waiters(struct rumpuser_cv *cv)
504 {
505 
506 	return cv->nwaiters;
507 }
508 
509 /*
510  * curlwp
511  */
512 
513 void
514 rumpuser_set_curlwp(struct lwp *l)
515 {
516 
517 	assert(pthread_getspecific(curlwpkey) == NULL || l == NULL);
518 	pthread_setspecific(curlwpkey, l);
519 }
520 
521 struct lwp *
522 rumpuser_get_curlwp(void)
523 {
524 
525 	return pthread_getspecific(curlwpkey);
526 }
527