xref: /netbsd-src/lib/librumpuser/rumpuser_pth.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: rumpuser_pth.c,v 1.45 2015/09/18 10:56:25 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2010 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include "rumpuser_port.h"
29 
30 #if !defined(lint)
31 __RCSID("$NetBSD: rumpuser_pth.c,v 1.45 2015/09/18 10:56:25 pooka Exp $");
32 #endif /* !lint */
33 
34 #include <sys/queue.h>
35 
36 #if defined(HAVE_SYS_ATOMIC_H)
37 #include <sys/atomic.h>
38 #endif
39 
40 #include <assert.h>
41 #include <errno.h>
42 #include <fcntl.h>
43 #include <pthread.h>
44 #include <stdlib.h>
45 #include <stdio.h>
46 #include <string.h>
47 #include <stdint.h>
48 #include <unistd.h>
49 
50 #include <rump/rumpuser.h>
51 
52 #include "rumpuser_int.h"
53 
54 int
55 rumpuser_thread_create(void *(*f)(void *), void *arg, const char *thrname,
56 	int joinable, int priority, int cpuidx, void **ptcookie)
57 {
58 	pthread_t ptid;
59 	pthread_t *ptidp;
60 	pthread_attr_t pattr;
61 	int rv, i;
62 
63 	if ((rv = pthread_attr_init(&pattr)) != 0)
64 		return rv;
65 
66 	if (joinable) {
67 		NOFAIL(ptidp = malloc(sizeof(*ptidp)));
68 		pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
69 	} else {
70 		ptidp = &ptid;
71 		pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_DETACHED);
72 	}
73 
74 	for (i = 0; i < 10; i++) {
75 		const struct timespec ts = {0, 10*1000*1000};
76 
77 		rv = pthread_create(ptidp, &pattr, f, arg);
78 		if (rv != EAGAIN)
79 			break;
80 		nanosleep(&ts, NULL);
81 	}
82 
83 #if defined(HAVE_PTHREAD_SETNAME3)
84 	if (rv == 0 && thrname) {
85 		pthread_setname_np(*ptidp, thrname, NULL);
86 	}
87 #elif defined(HAVE_PTHREAD_SETNAME2)
88 	if (rv == 0 && thrname) {
89 		pthread_setname_np(*ptidp, thrname);
90 	}
91 #endif
92 
93 	if (joinable) {
94 		assert(ptcookie);
95 		*ptcookie = ptidp;
96 	}
97 
98 	pthread_attr_destroy(&pattr);
99 
100 	ET(rv);
101 }
102 
103 __dead void
104 rumpuser_thread_exit(void)
105 {
106 
107 	/*
108 	 * FIXXXME: with glibc on ARM pthread_exit() aborts because
109 	 * it fails to unwind the stack.  In the typical case, only
110 	 * the mountroothook thread will exit and even that's
111 	 * conditional on vfs being present.
112 	 */
113 #if (defined(__ARMEL__) || defined(__ARMEB__)) && defined(__GLIBC__)
114 	for (;;)
115 		pause();
116 #endif
117 
118 	pthread_exit(NULL);
119 }
120 
121 int
122 rumpuser_thread_join(void *ptcookie)
123 {
124 	pthread_t *pt = ptcookie;
125 	int rv;
126 
127 	KLOCK_WRAP((rv = pthread_join(*pt, NULL)));
128 	if (rv == 0)
129 		free(pt);
130 
131 	ET(rv);
132 }
133 
134 struct rumpuser_mtx {
135 	pthread_mutex_t pthmtx;
136 	struct lwp *owner;
137 	int flags;
138 };
139 
140 void
141 rumpuser_mutex_init(struct rumpuser_mtx **mtxp, int flags)
142 {
143 	struct rumpuser_mtx *mtx;
144 	pthread_mutexattr_t att;
145 	size_t allocsz;
146 
147 	allocsz = (sizeof(*mtx)+RUMPUSER_LOCKALIGN) & ~(RUMPUSER_LOCKALIGN-1);
148 	NOFAIL(mtx = aligned_alloc(RUMPUSER_LOCKALIGN, allocsz));
149 
150 	pthread_mutexattr_init(&att);
151 	pthread_mutexattr_settype(&att, PTHREAD_MUTEX_ERRORCHECK);
152 	NOFAIL_ERRNO(pthread_mutex_init(&mtx->pthmtx, &att));
153 	pthread_mutexattr_destroy(&att);
154 
155 	mtx->owner = NULL;
156 	assert(flags != 0);
157 	mtx->flags = flags;
158 
159 	*mtxp = mtx;
160 }
161 
162 static void
163 mtxenter(struct rumpuser_mtx *mtx)
164 {
165 
166 	if (!(mtx->flags & RUMPUSER_MTX_KMUTEX))
167 		return;
168 
169 	assert(mtx->owner == NULL);
170 	mtx->owner = rumpuser_curlwp();
171 }
172 
173 static void
174 mtxexit(struct rumpuser_mtx *mtx)
175 {
176 
177 	if (!(mtx->flags & RUMPUSER_MTX_KMUTEX))
178 		return;
179 
180 	assert(mtx->owner != NULL);
181 	mtx->owner = NULL;
182 }
183 
184 void
185 rumpuser_mutex_enter(struct rumpuser_mtx *mtx)
186 {
187 
188 	if (mtx->flags & RUMPUSER_MTX_SPIN) {
189 		rumpuser_mutex_enter_nowrap(mtx);
190 		return;
191 	}
192 
193 	assert(mtx->flags & RUMPUSER_MTX_KMUTEX);
194 	if (pthread_mutex_trylock(&mtx->pthmtx) != 0)
195 		KLOCK_WRAP(NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx)));
196 	mtxenter(mtx);
197 }
198 
199 void
200 rumpuser_mutex_enter_nowrap(struct rumpuser_mtx *mtx)
201 {
202 
203 	assert(mtx->flags & RUMPUSER_MTX_SPIN);
204 	NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx));
205 	mtxenter(mtx);
206 }
207 
208 int
209 rumpuser_mutex_tryenter(struct rumpuser_mtx *mtx)
210 {
211 	int rv;
212 
213 	rv = pthread_mutex_trylock(&mtx->pthmtx);
214 	if (rv == 0) {
215 		mtxenter(mtx);
216 	}
217 
218 	ET(rv);
219 }
220 
221 void
222 rumpuser_mutex_exit(struct rumpuser_mtx *mtx)
223 {
224 
225 	mtxexit(mtx);
226 	NOFAIL_ERRNO(pthread_mutex_unlock(&mtx->pthmtx));
227 }
228 
229 void
230 rumpuser_mutex_destroy(struct rumpuser_mtx *mtx)
231 {
232 
233 	NOFAIL_ERRNO(pthread_mutex_destroy(&mtx->pthmtx));
234 	free(mtx);
235 }
236 
237 void
238 rumpuser_mutex_owner(struct rumpuser_mtx *mtx, struct lwp **lp)
239 {
240 
241 	if (__predict_false(!(mtx->flags & RUMPUSER_MTX_KMUTEX))) {
242 		printf("panic: rumpuser_mutex_held unsupported on non-kmtx\n");
243 		abort();
244 	}
245 
246 	*lp = mtx->owner;
247 }
248 
249 /*
250  * rwlocks.  these are mostly simple, except that NetBSD wants to
251  * support something called downgrade, which means we need to swap
252  * our exclusive lock for a shared lock.  to accommodate this,
253  * we need to check *after* acquiring a lock in case someone was
254  * downgrading it.  if so, we couldn't actually have it and maybe
255  * need to retry later.
256  */
257 
258 struct rumpuser_rw {
259 	pthread_rwlock_t pthrw;
260 #if !defined(__APPLE__) && !defined(__ANDROID__)
261 	char pad[64 - sizeof(pthread_rwlock_t)];
262 	pthread_spinlock_t spin;
263 #endif
264 	unsigned int readers;
265 	struct lwp *writer;
266 	int downgrade; /* someone is downgrading (hopefully lock holder ;) */
267 };
268 
269 static int
270 rw_amwriter(struct rumpuser_rw *rw)
271 {
272 
273 	return rw->writer == rumpuser_curlwp() && rw->readers == (unsigned)-1;
274 }
275 
276 static int
277 rw_nreaders(struct rumpuser_rw *rw)
278 {
279 	unsigned nreaders = rw->readers;
280 
281 	return nreaders != (unsigned)-1 ? nreaders : 0;
282 }
283 
284 static int
285 rw_setwriter(struct rumpuser_rw *rw, int retry)
286 {
287 
288 	/*
289 	 * Don't need the spinlock here, we already have an
290 	 * exclusive lock and "downgrade" is stable until complete.
291 	 */
292 	if (rw->downgrade) {
293 		pthread_rwlock_unlock(&rw->pthrw);
294 		if (retry) {
295 			struct timespec ts;
296 
297 			/* portable yield, essentially */
298 			ts.tv_sec = 0;
299 			ts.tv_nsec = 1;
300 			KLOCK_WRAP(nanosleep(&ts, NULL));
301 		}
302 		return EBUSY;
303 	}
304 	assert(rw->readers == 0);
305 	rw->writer = rumpuser_curlwp();
306 	rw->readers = (unsigned)-1;
307 	return 0;
308 }
309 
310 static void
311 rw_clearwriter(struct rumpuser_rw *rw)
312 {
313 
314 	assert(rw_amwriter(rw));
315 	rw->readers = 0;
316 	rw->writer = NULL;
317 }
318 
319 static inline void
320 rw_readup(struct rumpuser_rw *rw)
321 {
322 
323 #if defined(__NetBSD__) || defined(__APPLE__) || defined(__ANDROID__)
324 	atomic_inc_uint(&rw->readers);
325 #else
326 	pthread_spin_lock(&rw->spin);
327 	++rw->readers;
328 	pthread_spin_unlock(&rw->spin);
329 #endif
330 }
331 
332 static inline void
333 rw_readdown(struct rumpuser_rw *rw)
334 {
335 
336 #if defined(__NetBSD__) || defined(__APPLE__) || defined(__ANDROID__)
337 	atomic_dec_uint(&rw->readers);
338 #else
339 	pthread_spin_lock(&rw->spin);
340 	assert(rw->readers > 0);
341 	--rw->readers;
342 	pthread_spin_unlock(&rw->spin);
343 #endif
344 }
345 
346 void
347 rumpuser_rw_init(struct rumpuser_rw **rwp)
348 {
349 	struct rumpuser_rw *rw;
350 	size_t allocsz;
351 
352 	allocsz = (sizeof(*rw)+RUMPUSER_LOCKALIGN) & ~(RUMPUSER_LOCKALIGN-1);
353 
354 	NOFAIL(rw = aligned_alloc(RUMPUSER_LOCKALIGN, allocsz));
355 	NOFAIL_ERRNO(pthread_rwlock_init(&rw->pthrw, NULL));
356 #if !defined(__APPLE__) && !defined(__ANDROID__)
357 	NOFAIL_ERRNO(pthread_spin_init(&rw->spin, PTHREAD_PROCESS_PRIVATE));
358 #endif
359 	rw->readers = 0;
360 	rw->writer = NULL;
361 	rw->downgrade = 0;
362 
363 	*rwp = rw;
364 }
365 
366 void
367 rumpuser_rw_enter(int enum_rumprwlock, struct rumpuser_rw *rw)
368 {
369 	enum rumprwlock lk = enum_rumprwlock;
370 
371 	switch (lk) {
372 	case RUMPUSER_RW_WRITER:
373 		do {
374 			if (pthread_rwlock_trywrlock(&rw->pthrw) != 0)
375 				KLOCK_WRAP(NOFAIL_ERRNO(
376 				    pthread_rwlock_wrlock(&rw->pthrw)));
377 		} while (rw_setwriter(rw, 1) != 0);
378 		break;
379 	case RUMPUSER_RW_READER:
380 		if (pthread_rwlock_tryrdlock(&rw->pthrw) != 0)
381 			KLOCK_WRAP(NOFAIL_ERRNO(
382 			    pthread_rwlock_rdlock(&rw->pthrw)));
383 		rw_readup(rw);
384 		break;
385 	}
386 }
387 
388 int
389 rumpuser_rw_tryenter(int enum_rumprwlock, struct rumpuser_rw *rw)
390 {
391 	enum rumprwlock lk = enum_rumprwlock;
392 	int rv;
393 
394 	switch (lk) {
395 	case RUMPUSER_RW_WRITER:
396 		rv = pthread_rwlock_trywrlock(&rw->pthrw);
397 		if (rv == 0)
398 			rv = rw_setwriter(rw, 0);
399 		break;
400 	case RUMPUSER_RW_READER:
401 		rv = pthread_rwlock_tryrdlock(&rw->pthrw);
402 		if (rv == 0)
403 			rw_readup(rw);
404 		break;
405 	default:
406 		rv = EINVAL;
407 		break;
408 	}
409 
410 	ET(rv);
411 }
412 
413 int
414 rumpuser_rw_tryupgrade(struct rumpuser_rw *rw)
415 {
416 
417 	/*
418 	 * Not supported by pthreads.  Since the caller needs to
419 	 * back off anyway to avoid deadlock, always failing
420 	 * is correct.
421 	 */
422 	ET(EBUSY);
423 }
424 
425 /*
426  * convert from exclusive to shared lock without allowing anyone to
427  * obtain an exclusive lock in between.  actually, might allow
428  * someone to obtain the lock, we just don't allow that thread to
429  * return from the hypercall with it.
430  */
431 void
432 rumpuser_rw_downgrade(struct rumpuser_rw *rw)
433 {
434 
435 	assert(rw->downgrade == 0);
436 	rw->downgrade = 1;
437 	rumpuser_rw_exit(rw);
438 	/*
439 	 * though the competition can't get out of the hypervisor, it
440 	 * might have rescheduled itself after we released the lock.
441 	 * so need a wrap here.
442 	 */
443 	KLOCK_WRAP(NOFAIL_ERRNO(pthread_rwlock_rdlock(&rw->pthrw)));
444 	rw->downgrade = 0;
445 	rw_readup(rw);
446 }
447 
448 void
449 rumpuser_rw_exit(struct rumpuser_rw *rw)
450 {
451 
452 	if (rw_nreaders(rw))
453 		rw_readdown(rw);
454 	else
455 		rw_clearwriter(rw);
456 	NOFAIL_ERRNO(pthread_rwlock_unlock(&rw->pthrw));
457 }
458 
459 void
460 rumpuser_rw_destroy(struct rumpuser_rw *rw)
461 {
462 
463 	NOFAIL_ERRNO(pthread_rwlock_destroy(&rw->pthrw));
464 #if !defined(__APPLE__) && ! defined(__ANDROID__)
465 	NOFAIL_ERRNO(pthread_spin_destroy(&rw->spin));
466 #endif
467 	free(rw);
468 }
469 
470 void
471 rumpuser_rw_held(int enum_rumprwlock, struct rumpuser_rw *rw, int *rv)
472 {
473 	enum rumprwlock lk = enum_rumprwlock;
474 
475 	switch (lk) {
476 	case RUMPUSER_RW_WRITER:
477 		*rv = rw_amwriter(rw);
478 		break;
479 	case RUMPUSER_RW_READER:
480 		*rv = rw_nreaders(rw);
481 		break;
482 	}
483 }
484 
485 /*
486  * condvar
487  */
488 
489 struct rumpuser_cv {
490 	pthread_cond_t pthcv;
491 	int nwaiters;
492 };
493 
494 void
495 rumpuser_cv_init(struct rumpuser_cv **cv)
496 {
497 
498 	NOFAIL(*cv = malloc(sizeof(struct rumpuser_cv)));
499 	NOFAIL_ERRNO(pthread_cond_init(&((*cv)->pthcv), NULL));
500 	(*cv)->nwaiters = 0;
501 }
502 
503 void
504 rumpuser_cv_destroy(struct rumpuser_cv *cv)
505 {
506 
507 	NOFAIL_ERRNO(pthread_cond_destroy(&cv->pthcv));
508 	free(cv);
509 }
510 
511 static void
512 cv_unschedule(struct rumpuser_mtx *mtx, int *nlocks)
513 {
514 
515 	rumpkern_unsched(nlocks, mtx);
516 	mtxexit(mtx);
517 }
518 
519 static void
520 cv_reschedule(struct rumpuser_mtx *mtx, int nlocks)
521 {
522 
523 	/*
524 	 * If the cv interlock is a spin mutex, we must first release
525 	 * the mutex that was reacquired by pthread_cond_wait(),
526 	 * acquire the CPU context and only then relock the mutex.
527 	 * This is to preserve resource allocation order so that
528 	 * we don't deadlock.  Non-spinning mutexes don't have this
529 	 * problem since they don't use a hold-and-wait approach
530 	 * to acquiring the mutex wrt the rump kernel CPU context.
531 	 *
532 	 * The more optimal solution would be to rework rumpkern_sched()
533 	 * so that it's possible to tell the scheduler
534 	 * "if you need to block, drop this lock first", but I'm not
535 	 * going poking there without some numbers on how often this
536 	 * path is taken for spin mutexes.
537 	 */
538 	if ((mtx->flags & (RUMPUSER_MTX_SPIN | RUMPUSER_MTX_KMUTEX)) ==
539 	    (RUMPUSER_MTX_SPIN | RUMPUSER_MTX_KMUTEX)) {
540 		NOFAIL_ERRNO(pthread_mutex_unlock(&mtx->pthmtx));
541 		rumpkern_sched(nlocks, mtx);
542 		rumpuser_mutex_enter_nowrap(mtx);
543 	} else {
544 		mtxenter(mtx);
545 		rumpkern_sched(nlocks, mtx);
546 	}
547 }
548 
549 void
550 rumpuser_cv_wait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
551 {
552 	int nlocks;
553 
554 	cv->nwaiters++;
555 	cv_unschedule(mtx, &nlocks);
556 	NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
557 	cv_reschedule(mtx, nlocks);
558 	cv->nwaiters--;
559 }
560 
561 void
562 rumpuser_cv_wait_nowrap(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
563 {
564 
565 	cv->nwaiters++;
566 	mtxexit(mtx);
567 	NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
568 	mtxenter(mtx);
569 	cv->nwaiters--;
570 }
571 
572 int
573 rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx,
574 	int64_t sec, int64_t nsec)
575 {
576 	struct timespec ts;
577 	int rv, nlocks;
578 
579 	/*
580 	 * Get clock already here, just in case we will be put to sleep
581 	 * after releasing the kernel context.
582 	 *
583 	 * The condition variables should use CLOCK_MONOTONIC, but since
584 	 * that's not available everywhere, leave it for another day.
585 	 */
586 	clock_gettime(CLOCK_REALTIME, &ts);
587 
588 	cv->nwaiters++;
589 	cv_unschedule(mtx, &nlocks);
590 
591 	ts.tv_sec += sec;
592 	ts.tv_nsec += nsec;
593 	if (ts.tv_nsec >= 1000*1000*1000) {
594 		ts.tv_sec++;
595 		ts.tv_nsec -= 1000*1000*1000;
596 	}
597 	rv = pthread_cond_timedwait(&cv->pthcv, &mtx->pthmtx, &ts);
598 
599 	cv_reschedule(mtx, nlocks);
600 	cv->nwaiters--;
601 
602 	ET(rv);
603 }
604 
605 void
606 rumpuser_cv_signal(struct rumpuser_cv *cv)
607 {
608 
609 	NOFAIL_ERRNO(pthread_cond_signal(&cv->pthcv));
610 }
611 
612 void
613 rumpuser_cv_broadcast(struct rumpuser_cv *cv)
614 {
615 
616 	NOFAIL_ERRNO(pthread_cond_broadcast(&cv->pthcv));
617 }
618 
619 void
620 rumpuser_cv_has_waiters(struct rumpuser_cv *cv, int *nwaiters)
621 {
622 
623 	*nwaiters = cv->nwaiters;
624 }
625 
626 /*
627  * curlwp
628  */
629 
630 static pthread_key_t curlwpkey;
631 
632 /*
633  * the if0'd curlwp implementation is not used by this hypervisor,
634  * but serves as test code to check that the intended usage works.
635  */
636 #if 0
637 struct rumpuser_lwp {
638 	struct lwp *l;
639 	LIST_ENTRY(rumpuser_lwp) l_entries;
640 };
641 static LIST_HEAD(, rumpuser_lwp) lwps = LIST_HEAD_INITIALIZER(lwps);
642 static pthread_mutex_t lwplock = PTHREAD_MUTEX_INITIALIZER;
643 
644 void
645 rumpuser_curlwpop(enum rumplwpop op, struct lwp *l)
646 {
647 	struct rumpuser_lwp *rl, *rliter;
648 
649 	switch (op) {
650 	case RUMPUSER_LWP_CREATE:
651 		rl = malloc(sizeof(*rl));
652 		rl->l = l;
653 		pthread_mutex_lock(&lwplock);
654 		LIST_FOREACH(rliter, &lwps, l_entries) {
655 			if (rliter->l == l) {
656 				fprintf(stderr, "LWP_CREATE: %p exists\n", l);
657 				abort();
658 			}
659 		}
660 		LIST_INSERT_HEAD(&lwps, rl, l_entries);
661 		pthread_mutex_unlock(&lwplock);
662 		break;
663 	case RUMPUSER_LWP_DESTROY:
664 		pthread_mutex_lock(&lwplock);
665 		LIST_FOREACH(rl, &lwps, l_entries) {
666 			if (rl->l == l)
667 				break;
668 		}
669 		if (!rl) {
670 			fprintf(stderr, "LWP_DESTROY: %p does not exist\n", l);
671 			abort();
672 		}
673 		LIST_REMOVE(rl, l_entries);
674 		pthread_mutex_unlock(&lwplock);
675 		free(rl);
676 		break;
677 	case RUMPUSER_LWP_SET:
678 		assert(pthread_getspecific(curlwpkey) == NULL && l != NULL);
679 
680 		pthread_mutex_lock(&lwplock);
681 		LIST_FOREACH(rl, &lwps, l_entries) {
682 			if (rl->l == l)
683 				break;
684 		}
685 		if (!rl) {
686 			fprintf(stderr,
687 			    "LWP_SET: %p does not exist\n", l);
688 			abort();
689 		}
690 		pthread_mutex_unlock(&lwplock);
691 
692 		pthread_setspecific(curlwpkey, rl);
693 		break;
694 	case RUMPUSER_LWP_CLEAR:
695 		assert(((struct rumpuser_lwp *)
696 		    pthread_getspecific(curlwpkey))->l == l);
697 		pthread_setspecific(curlwpkey, NULL);
698 		break;
699 	}
700 }
701 
702 struct lwp *
703 rumpuser_curlwp(void)
704 {
705 	struct rumpuser_lwp *rl;
706 
707 	rl = pthread_getspecific(curlwpkey);
708 	return rl ? rl->l : NULL;
709 }
710 
711 #else
712 
713 void
714 rumpuser_curlwpop(int enum_rumplwpop, struct lwp *l)
715 {
716 	enum rumplwpop op = enum_rumplwpop;
717 
718 	switch (op) {
719 	case RUMPUSER_LWP_CREATE:
720 		break;
721 	case RUMPUSER_LWP_DESTROY:
722 		break;
723 	case RUMPUSER_LWP_SET:
724 		assert(pthread_getspecific(curlwpkey) == NULL);
725 		pthread_setspecific(curlwpkey, l);
726 		break;
727 	case RUMPUSER_LWP_CLEAR:
728 		assert(pthread_getspecific(curlwpkey) == l);
729 		pthread_setspecific(curlwpkey, NULL);
730 		break;
731 	}
732 }
733 
734 struct lwp *
735 rumpuser_curlwp(void)
736 {
737 
738 	return pthread_getspecific(curlwpkey);
739 }
740 #endif
741 
742 
743 void
744 rumpuser__thrinit(void)
745 {
746 	pthread_key_create(&curlwpkey, NULL);
747 }
748