xref: /netbsd-src/lib/librumpuser/rumpuser_pth.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: rumpuser_pth.c,v 1.3 2010/05/31 23:09:30 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2010 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #if !defined(lint)
30 __RCSID("$NetBSD: rumpuser_pth.c,v 1.3 2010/05/31 23:09:30 pooka Exp $");
31 #endif /* !lint */
32 
33 #ifdef __linux__
34 #define _XOPEN_SOURCE 500
35 #define _BSD_SOURCE
36 #define _FILE_OFFSET_BITS 64
37 #endif
38 
39 #include <assert.h>
40 #include <errno.h>
41 #include <pthread.h>
42 #include <stdlib.h>
43 #include <stdio.h>
44 #include <string.h>
45 #include <stdint.h>
46 #include <unistd.h>
47 
48 #include <rump/rumpuser.h>
49 
50 #include "rumpuser_int.h"
51 
52 static pthread_key_t curlwpkey;
53 
54 #define NOFAIL(a) do {if (!(a)) abort();} while (/*CONSTCOND*/0)
55 #define NOFAIL_ERRNO(a)							\
56 do {									\
57 	int fail_rv = (a);						\
58 	if (fail_rv) {							\
59 		printf("panic: rumpuser fatal failure %d (%s)\n",	\
60 		    fail_rv, strerror(fail_rv));			\
61 		    abort();						\
62 	}								\
63 } while (/*CONSTCOND*/0)
64 
65 #define RUMTX_INCRECURSION(mtx) ((mtx)->recursion++)
66 #define RUMTX_DECRECURSION(mtx) ((mtx)->recursion--)
67 struct rumpuser_mtx {
68 	pthread_mutex_t pthmtx;
69 	pthread_t owner;
70 	unsigned recursion;
71 };
72 
73 #define RURW_AMWRITER(rw) (pthread_equal(rw->writer, pthread_self())	\
74 				&& rw->readers == -1)
75 #define RURW_HASREAD(rw)  (rw->readers > 0)
76 
77 #define RURW_SETWRITE(rw)						\
78 do {									\
79 	assert(rw->readers == 0);					\
80 	rw->writer = pthread_self();					\
81 	rw->readers = -1;						\
82 } while (/*CONSTCOND*/0)
83 #define RURW_CLRWRITE(rw)						\
84 do {									\
85 	assert(rw->readers == -1 && RURW_AMWRITER(rw));			\
86 	rw->readers = 0;						\
87 } while (/*CONSTCOND*/0)
88 #define RURW_INCREAD(rw)						\
89 do {									\
90 	pthread_spin_lock(&rw->spin);					\
91 	assert(rw->readers >= 0);					\
92 	++(rw)->readers;						\
93 	pthread_spin_unlock(&rw->spin);					\
94 } while (/*CONSTCOND*/0)
95 #define RURW_DECREAD(rw)						\
96 do {									\
97 	pthread_spin_lock(&rw->spin);					\
98 	assert(rw->readers > 0);					\
99 	--(rw)->readers;						\
100 	pthread_spin_unlock(&rw->spin);					\
101 } while (/*CONSTCOND*/0)
102 
103 struct rumpuser_rw {
104 	pthread_rwlock_t pthrw;
105 	pthread_spinlock_t spin;
106 	int readers;
107 	pthread_t writer;
108 };
109 
110 struct rumpuser_cv {
111 	pthread_cond_t pthcv;
112 	int nwaiters;
113 };
114 
115 struct rumpuser_mtx rumpuser_aio_mtx;
116 struct rumpuser_cv rumpuser_aio_cv;
117 int rumpuser_aio_head, rumpuser_aio_tail;
118 struct rumpuser_aio rumpuser_aios[N_AIOS];
119 
120 kernel_lockfn	rumpuser__klock;
121 kernel_unlockfn	rumpuser__kunlock;
122 int		rumpuser__wantthreads;
123 
124 void
125 /*ARGSUSED*/
126 rumpuser_biothread(void *arg)
127 {
128 	struct rumpuser_aio *rua;
129 	rump_biodone_fn biodone = arg;
130 	ssize_t rv;
131 	int error, dummy;
132 
133 	/* unschedule from CPU.  we reschedule before running the interrupt */
134 	rumpuser__kunlock(0, &dummy, NULL);
135 	assert(dummy == 0);
136 
137 	NOFAIL_ERRNO(pthread_mutex_lock(&rumpuser_aio_mtx.pthmtx));
138 	for (;;) {
139 		while (rumpuser_aio_head == rumpuser_aio_tail) {
140 			NOFAIL_ERRNO(pthread_cond_wait(&rumpuser_aio_cv.pthcv,
141 			    &rumpuser_aio_mtx.pthmtx));
142 		}
143 
144 		rua = &rumpuser_aios[rumpuser_aio_tail];
145 		assert(rua->rua_bp != NULL);
146 		pthread_mutex_unlock(&rumpuser_aio_mtx.pthmtx);
147 
148 		if (rua->rua_op & RUA_OP_READ) {
149 			error = 0;
150 			rv = pread(rua->rua_fd, rua->rua_data,
151 			    rua->rua_dlen, rua->rua_off);
152 			if (rv < 0) {
153 				rv = 0;
154 				error = errno;
155 			}
156 		} else {
157 			error = 0;
158 			rv = pwrite(rua->rua_fd, rua->rua_data,
159 			    rua->rua_dlen, rua->rua_off);
160 			if (rv < 0) {
161 				rv = 0;
162 				error = errno;
163 			} else if (rua->rua_op & RUA_OP_SYNC) {
164 #ifdef __NetBSD__
165 				fsync_range(rua->rua_fd, FDATASYNC,
166 				    rua->rua_off, rua->rua_dlen);
167 #else
168 				fsync(rua->rua_fd);
169 #endif
170 			}
171 		}
172 		rumpuser__klock(0, NULL);
173 		biodone(rua->rua_bp, (size_t)rv, error);
174 		rumpuser__kunlock(0, &dummy, NULL);
175 
176 		rua->rua_bp = NULL;
177 
178 		NOFAIL_ERRNO(pthread_mutex_lock(&rumpuser_aio_mtx.pthmtx));
179 		rumpuser_aio_tail = (rumpuser_aio_tail+1) % N_AIOS;
180 		pthread_cond_signal(&rumpuser_aio_cv.pthcv);
181 	}
182 
183 	/*NOTREACHED*/
184 	fprintf(stderr, "error: rumpuser_biothread reached unreachable\n");
185 	abort();
186 }
187 
188 void
189 rumpuser_thrinit(kernel_lockfn lockfn, kernel_unlockfn unlockfn, int threads)
190 {
191 
192 	pthread_mutex_init(&rumpuser_aio_mtx.pthmtx, NULL);
193 	pthread_cond_init(&rumpuser_aio_cv.pthcv, NULL);
194 
195 	pthread_key_create(&curlwpkey, NULL);
196 
197 	rumpuser__klock = lockfn;
198 	rumpuser__kunlock = unlockfn;
199 	rumpuser__wantthreads = threads;
200 }
201 
202 #if 0
203 void
204 rumpuser__thrdestroy(void)
205 {
206 
207 	pthread_key_delete(curlwpkey);
208 }
209 #endif
210 
211 int
212 rumpuser_thread_create(void *(*f)(void *), void *arg, const char *thrname,
213 	int joinable, void **ptcookie)
214 {
215 	pthread_t ptid;
216 	pthread_t *ptidp;
217 	pthread_attr_t pattr;
218 	int rv;
219 
220 	if ((rv = pthread_attr_init(&pattr)) != 0)
221 		return rv;
222 
223 	if (joinable) {
224 		NOFAIL(ptidp = malloc(sizeof(*ptidp)));
225 		pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
226 	} else {
227 		ptidp = &ptid;
228 		pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_DETACHED);
229 	}
230 
231 	rv = pthread_create(ptidp, &pattr, f, arg);
232 #ifdef __NetBSD__
233 	if (rv == 0 && thrname)
234 		pthread_setname_np(ptid, thrname, NULL);
235 #endif
236 
237 	if (joinable) {
238 		assert(ptcookie);
239 		*ptcookie = ptidp;
240 	}
241 
242 	pthread_attr_destroy(&pattr);
243 
244 	return rv;
245 }
246 
247 __dead void
248 rumpuser_thread_exit(void)
249 {
250 
251 	pthread_exit(NULL);
252 }
253 
254 int
255 rumpuser_thread_join(void *ptcookie)
256 {
257 	pthread_t *pt = ptcookie;
258 	int rv;
259 
260 	KLOCK_WRAP((rv = pthread_join(*pt, NULL)));
261 	if (rv == 0)
262 		free(pt);
263 
264 	return rv;
265 }
266 
267 void
268 rumpuser_mutex_init(struct rumpuser_mtx **mtx)
269 {
270 	pthread_mutexattr_t att;
271 
272 	NOFAIL(*mtx = malloc(sizeof(struct rumpuser_mtx)));
273 
274 	pthread_mutexattr_init(&att);
275 	pthread_mutexattr_settype(&att, PTHREAD_MUTEX_ERRORCHECK);
276 	NOFAIL_ERRNO(pthread_mutex_init(&((*mtx)->pthmtx), &att));
277 	pthread_mutexattr_destroy(&att);
278 
279 	(*mtx)->owner = NULL;
280 	(*mtx)->recursion = 0;
281 }
282 
283 void
284 rumpuser_mutex_recursive_init(struct rumpuser_mtx **mtx)
285 {
286 	pthread_mutexattr_t mattr;
287 
288 	pthread_mutexattr_init(&mattr);
289 	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
290 
291 	NOFAIL(*mtx = malloc(sizeof(struct rumpuser_mtx)));
292 	NOFAIL_ERRNO(pthread_mutex_init(&((*mtx)->pthmtx), &mattr));
293 	(*mtx)->owner = NULL;
294 	(*mtx)->recursion = 0;
295 
296 	pthread_mutexattr_destroy(&mattr);
297 }
298 
299 static void
300 mtxenter(struct rumpuser_mtx *mtx)
301 {
302 
303 	if (mtx->recursion++ == 0) {
304 		assert(mtx->owner == NULL);
305 		mtx->owner = pthread_self();
306 	} else {
307 		assert(pthread_equal(mtx->owner, pthread_self()));
308 	}
309 }
310 
311 static void
312 mtxexit(struct rumpuser_mtx *mtx)
313 {
314 
315 	assert(mtx->owner != NULL);
316 	if (--mtx->recursion == 0)
317 		mtx->owner = NULL;
318 }
319 
320 void
321 rumpuser_mutex_enter(struct rumpuser_mtx *mtx)
322 {
323 
324 	if (pthread_mutex_trylock(&mtx->pthmtx) != 0)
325 		KLOCK_WRAP(NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx)));
326 	mtxenter(mtx);
327 }
328 
329 void
330 rumpuser_mutex_enter_nowrap(struct rumpuser_mtx *mtx)
331 {
332 
333 	NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx));
334 	mtxenter(mtx);
335 }
336 
337 int
338 rumpuser_mutex_tryenter(struct rumpuser_mtx *mtx)
339 {
340 	int rv;
341 
342 	rv = pthread_mutex_trylock(&mtx->pthmtx);
343 	if (rv == 0) {
344 		mtxenter(mtx);
345 	}
346 
347 	return rv == 0;
348 }
349 
350 void
351 rumpuser_mutex_exit(struct rumpuser_mtx *mtx)
352 {
353 
354 	mtxexit(mtx);
355 	NOFAIL_ERRNO(pthread_mutex_unlock(&mtx->pthmtx));
356 }
357 
358 void
359 rumpuser_mutex_destroy(struct rumpuser_mtx *mtx)
360 {
361 
362 	NOFAIL_ERRNO(pthread_mutex_destroy(&mtx->pthmtx));
363 	free(mtx);
364 }
365 
366 int
367 rumpuser_mutex_held(struct rumpuser_mtx *mtx)
368 {
369 
370 	return mtx->recursion && pthread_equal(mtx->owner, pthread_self());
371 }
372 
373 void
374 rumpuser_rw_init(struct rumpuser_rw **rw)
375 {
376 
377 	NOFAIL(*rw = malloc(sizeof(struct rumpuser_rw)));
378 	NOFAIL_ERRNO(pthread_rwlock_init(&((*rw)->pthrw), NULL));
379 	NOFAIL_ERRNO(pthread_spin_init(&((*rw)->spin), PTHREAD_PROCESS_SHARED));
380 	(*rw)->readers = 0;
381 	(*rw)->writer = NULL;
382 }
383 
384 void
385 rumpuser_rw_enter(struct rumpuser_rw *rw, int iswrite)
386 {
387 
388 	if (iswrite) {
389 		if (pthread_rwlock_trywrlock(&rw->pthrw) != 0)
390 			KLOCK_WRAP(NOFAIL_ERRNO(
391 			    pthread_rwlock_wrlock(&rw->pthrw)));
392 		RURW_SETWRITE(rw);
393 	} else {
394 		if (pthread_rwlock_tryrdlock(&rw->pthrw) != 0)
395 			KLOCK_WRAP(NOFAIL_ERRNO(
396 			    pthread_rwlock_rdlock(&rw->pthrw)));
397 		RURW_INCREAD(rw);
398 	}
399 }
400 
401 int
402 rumpuser_rw_tryenter(struct rumpuser_rw *rw, int iswrite)
403 {
404 	int rv;
405 
406 	if (iswrite) {
407 		rv = pthread_rwlock_trywrlock(&rw->pthrw);
408 		if (rv == 0)
409 			RURW_SETWRITE(rw);
410 	} else {
411 		rv = pthread_rwlock_tryrdlock(&rw->pthrw);
412 		if (rv == 0)
413 			RURW_INCREAD(rw);
414 	}
415 
416 	return rv == 0;
417 }
418 
419 void
420 rumpuser_rw_exit(struct rumpuser_rw *rw)
421 {
422 
423 	if (RURW_HASREAD(rw))
424 		RURW_DECREAD(rw);
425 	else
426 		RURW_CLRWRITE(rw);
427 	NOFAIL_ERRNO(pthread_rwlock_unlock(&rw->pthrw));
428 }
429 
430 void
431 rumpuser_rw_destroy(struct rumpuser_rw *rw)
432 {
433 
434 	NOFAIL_ERRNO(pthread_rwlock_destroy(&rw->pthrw));
435 	NOFAIL_ERRNO(pthread_spin_destroy(&rw->spin));
436 	free(rw);
437 }
438 
439 int
440 rumpuser_rw_held(struct rumpuser_rw *rw)
441 {
442 
443 	return rw->readers != 0;
444 }
445 
446 int
447 rumpuser_rw_rdheld(struct rumpuser_rw *rw)
448 {
449 
450 	return RURW_HASREAD(rw);
451 }
452 
453 int
454 rumpuser_rw_wrheld(struct rumpuser_rw *rw)
455 {
456 
457 	return RURW_AMWRITER(rw);
458 }
459 
460 void
461 rumpuser_cv_init(struct rumpuser_cv **cv)
462 {
463 
464 	NOFAIL(*cv = malloc(sizeof(struct rumpuser_cv)));
465 	NOFAIL_ERRNO(pthread_cond_init(&((*cv)->pthcv), NULL));
466 	(*cv)->nwaiters = 0;
467 }
468 
469 void
470 rumpuser_cv_destroy(struct rumpuser_cv *cv)
471 {
472 
473 	NOFAIL_ERRNO(pthread_cond_destroy(&cv->pthcv));
474 	free(cv);
475 }
476 
477 void
478 rumpuser_cv_wait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
479 {
480 	int nlocks;
481 
482 	cv->nwaiters++;
483 	rumpuser__kunlock(0, &nlocks, mtx);
484 	assert(mtx->recursion == 1);
485 	mtxexit(mtx);
486 	NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
487 	mtxenter(mtx);
488 	rumpuser__klock(nlocks, mtx);
489 	cv->nwaiters--;
490 }
491 
492 void
493 rumpuser_cv_wait_nowrap(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
494 {
495 
496 	cv->nwaiters++;
497 	assert(mtx->recursion == 1);
498 	mtxexit(mtx);
499 	NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
500 	mtxenter(mtx);
501 	cv->nwaiters--;
502 }
503 
504 int
505 rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx,
506 	int64_t sec, int64_t nsec)
507 {
508 	struct timespec ts;
509 	int rv, nlocks;
510 
511 	/* LINTED */
512 	ts.tv_sec = sec; ts.tv_nsec = nsec;
513 
514 	cv->nwaiters++;
515 	rumpuser__kunlock(0, &nlocks, mtx);
516 	mtxexit(mtx);
517 	rv = pthread_cond_timedwait(&cv->pthcv, &mtx->pthmtx, &ts);
518 	mtxenter(mtx);
519 	rumpuser__klock(nlocks, mtx);
520 	cv->nwaiters--;
521 	if (rv != 0 && rv != ETIMEDOUT)
522 		abort();
523 
524 	return rv == ETIMEDOUT;
525 }
526 
527 void
528 rumpuser_cv_signal(struct rumpuser_cv *cv)
529 {
530 
531 	NOFAIL_ERRNO(pthread_cond_signal(&cv->pthcv));
532 }
533 
534 void
535 rumpuser_cv_broadcast(struct rumpuser_cv *cv)
536 {
537 
538 	NOFAIL_ERRNO(pthread_cond_broadcast(&cv->pthcv));
539 }
540 
541 int
542 rumpuser_cv_has_waiters(struct rumpuser_cv *cv)
543 {
544 
545 	return cv->nwaiters;
546 }
547 
548 /*
549  * curlwp
550  */
551 
552 void
553 rumpuser_set_curlwp(struct lwp *l)
554 {
555 
556 	assert(pthread_getspecific(curlwpkey) == NULL || l == NULL);
557 	pthread_setspecific(curlwpkey, l);
558 }
559 
560 struct lwp *
561 rumpuser_get_curlwp(void)
562 {
563 
564 	return pthread_getspecific(curlwpkey);
565 }
566