xref: /netbsd-src/sys/rump/librump/rumpkern/threads.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: threads.c,v 1.21 2013/05/02 19:15:01 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2009 Antti Kantee.  All Rights Reserved.
5  *
6  * Development of this software was supported by
7  * The Finnish Cultural Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.21 2013/05/02 19:15:01 pooka Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/atomic.h>
36 #include <sys/kmem.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/systm.h>
40 
41 #include <rump/rumpuser.h>
42 
43 #include "rump_private.h"
44 
45 struct kthdesc {
46 	void (*f)(void *);
47 	void *arg;
48 	struct lwp *mylwp;
49 };
50 
51 static bool threads_are_go;
52 static struct rumpuser_mtx *thrmtx;
53 static struct rumpuser_cv *thrcv;
54 
55 static void *
56 threadbouncer(void *arg)
57 {
58 	struct kthdesc *k = arg;
59 	struct lwp *l = k->mylwp;
60 	void (*f)(void *);
61 	void *thrarg;
62 
63 	f = k->f;
64 	thrarg = k->arg;
65 
66 	/* don't allow threads to run before all CPUs have fully attached */
67 	if (!threads_are_go) {
68 		rumpuser_mutex_enter_nowrap(thrmtx);
69 		while (!threads_are_go) {
70 			rumpuser_cv_wait_nowrap(thrcv, thrmtx);
71 		}
72 		rumpuser_mutex_exit(thrmtx);
73 	}
74 
75 	/* schedule ourselves */
76 	rumpuser_curlwpop(RUMPUSER_LWP_SET, l);
77 	rump_schedule();
78 
79 	/* free dance struct */
80 	free(k, M_TEMP);
81 
82 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
83 		KERNEL_LOCK(1, NULL);
84 
85 	f(thrarg);
86 
87 	panic("unreachable, should kthread_exit()");
88 }
89 
90 void
91 rump_thread_init(void)
92 {
93 
94 	rumpuser_mutex_init(&thrmtx, RUMPUSER_MTX_SPIN);
95 	rumpuser_cv_init(&thrcv);
96 }
97 
98 void
99 rump_thread_allow(void)
100 {
101 
102 	rumpuser_mutex_enter(thrmtx);
103 	threads_are_go = true;
104 	rumpuser_cv_broadcast(thrcv);
105 	rumpuser_mutex_exit(thrmtx);
106 
107 }
108 
109 static struct {
110 	const char *t_name;
111 	bool t_ncmp;
112 } nothreads[] = {
113 	{ "vrele", false },
114 	{ "vdrain", false },
115 	{ "cachegc", false },
116 	{ "nfssilly", false },
117 	{ "unpgc", false },
118 	{ "pmf", true },
119 	{ "xcall", true },
120 };
121 
122 int
123 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
124 	void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
125 {
126 	char thrstore[MAXCOMLEN];
127 	const char *thrname = NULL;
128 	va_list ap;
129 	struct kthdesc *k;
130 	struct lwp *l;
131 	int rv;
132 
133 	thrstore[0] = '\0';
134 	if (fmt) {
135 		va_start(ap, fmt);
136 		vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
137 		va_end(ap);
138 		thrname = thrstore;
139 	}
140 
141 	/*
142 	 * We don't want a module unload thread.
143 	 * (XXX: yes, this is a kludge too, and the kernel should
144 	 * have a more flexible method for configuring which threads
145 	 * we want).
146 	 */
147 	if (strcmp(thrstore, "modunload") == 0) {
148 		return 0;
149 	}
150 
151 	if (!rump_threads) {
152 		bool matched;
153 		int i;
154 
155 		/* do we want to fake it? */
156 		for (i = 0; i < __arraycount(nothreads); i++) {
157 			if (nothreads[i].t_ncmp) {
158 				matched = strncmp(thrstore, nothreads[i].t_name,
159 				    strlen(nothreads[i].t_name)) == 0;
160 			} else {
161 				matched = strcmp(thrstore,
162 				    nothreads[i].t_name) == 0;
163 			}
164 			if (matched) {
165 				aprint_error("rump kernel threads not enabled, "
166 				    "%s not functional\n", nothreads[i].t_name);
167 				return 0;
168 			}
169 		}
170 		panic("threads not available");
171 	}
172 	KASSERT(fmt != NULL);
173 
174 	k = malloc(sizeof(*k), M_TEMP, M_WAITOK);
175 	k->f = func;
176 	k->arg = arg;
177 	k->mylwp = l = rump__lwproc_alloclwp(&proc0);
178 	l->l_flag |= LW_SYSTEM;
179 	if (flags & KTHREAD_MPSAFE)
180 		l->l_pflag |= LP_MPSAFE;
181 	if (flags & KTHREAD_INTR)
182 		l->l_pflag |= LP_INTR;
183 	if (ci) {
184 		l->l_pflag |= LP_BOUND;
185 		l->l_target_cpu = ci;
186 	}
187 	if (thrname) {
188 		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
189 		strlcpy(l->l_name, thrname, MAXCOMLEN);
190 	}
191 
192 	rv = rumpuser_thread_create(threadbouncer, k, thrname,
193 	    (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN,
194 	    pri, ci ? ci->ci_index : -1, &l->l_ctxlink);
195 	if (rv)
196 		return rv;
197 
198 	if (newlp) {
199 		*newlp = l;
200 	} else {
201 		KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
202 	}
203 
204 	return 0;
205 }
206 
207 void
208 kthread_exit(int ecode)
209 {
210 
211 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
212 		KERNEL_UNLOCK_LAST(NULL);
213 	rump_lwproc_releaselwp();
214 	/* unschedule includes membar */
215 	rump_unschedule();
216 	rumpuser_thread_exit();
217 }
218 
219 int
220 kthread_join(struct lwp *l)
221 {
222 	int rv;
223 
224 	KASSERT(l->l_ctxlink != NULL);
225 	rv = rumpuser_thread_join(l->l_ctxlink);
226 	membar_consumer();
227 
228 	return rv;
229 }
230