xref: /netbsd-src/sys/kern/kern_kthread.c (revision 6c27fcfbed255cf72c44cc36f3dc21404c6bcc2c)
1 /*	$NetBSD: kern_kthread.c,v 1.49 2023/09/23 14:40:42 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2007, 2009, 2019, 2023
5  *     The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center, and by Andrew Doran.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.49 2023/09/23 14:40:42 ad Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/cpu.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/mutex.h>
43 #include <sys/sched.h>
44 #include <sys/kmem.h>
45 #include <sys/msan.h>
46 
47 #include <uvm/uvm_extern.h>
48 
49 static kmutex_t		kthread_lock;
50 static kcondvar_t	kthread_cv;
51 
52 void
kthread_sysinit(void)53 kthread_sysinit(void)
54 {
55 
56 	mutex_init(&kthread_lock, MUTEX_DEFAULT, IPL_NONE);
57 	cv_init(&kthread_cv, "kthrwait");
58 }
59 
60 /*
61  * kthread_create: create a kernel thread, that is, system-only LWP.
62  */
63 int
kthread_create(pri_t pri,int flag,struct cpu_info * ci,void (* func)(void *),void * arg,lwp_t ** lp,const char * fmt,...)64 kthread_create(pri_t pri, int flag, struct cpu_info *ci,
65     void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...)
66 {
67 	lwp_t *l;
68 	vaddr_t uaddr;
69 	int error, lc;
70 	va_list ap;
71 
72 	KASSERT((flag & KTHREAD_INTR) == 0 || (flag & KTHREAD_MPSAFE) != 0);
73 
74 	uaddr = uvm_uarea_system_alloc(
75 	   (flag & (KTHREAD_INTR|KTHREAD_IDLE)) == KTHREAD_IDLE ? ci : NULL);
76 	if (uaddr == 0) {
77 		return ENOMEM;
78 	}
79 	kmsan_orig((void *)uaddr, USPACE, KMSAN_TYPE_POOL, __RET_ADDR);
80 	if ((flag & KTHREAD_TS) != 0) {
81 		lc = SCHED_OTHER;
82 	} else {
83 		lc = SCHED_RR;
84 	}
85 
86 	error = lwp_create(&lwp0, &proc0, uaddr, LWP_DETACHED, NULL,
87 	    0, func, arg, &l, lc, &lwp0.l_sigmask, &lwp0.l_sigstk);
88 	if (error) {
89 		uvm_uarea_system_free(uaddr);
90 		return error;
91 	}
92 	if (fmt != NULL) {
93 		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
94 		va_start(ap, fmt);
95 		vsnprintf(l->l_name, MAXCOMLEN, fmt, ap);
96 		va_end(ap);
97 	}
98 
99 	/*
100 	 * Set parameters.
101 	 */
102 	if (pri == PRI_NONE) {
103 		if ((flag & KTHREAD_TS) != 0) {
104 			/* Maximum user priority level. */
105 			pri = MAXPRI_USER;
106 		} else {
107 			/* Minimum kernel priority level. */
108 			pri = PRI_KTHREAD;
109 		}
110 	}
111 	mutex_enter(proc0.p_lock);
112 	lwp_lock(l);
113 	lwp_changepri(l, pri);
114 	if (ci != NULL) {
115 		if (ci != l->l_cpu) {
116 			lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
117 			lwp_lock(l);
118 			l->l_cpu = ci;
119 		}
120 		l->l_pflag |= LP_BOUND;
121 	}
122 
123 	if ((flag & KTHREAD_MUSTJOIN) != 0) {
124 		KASSERT(lp != NULL);
125 		l->l_pflag |= LP_MUSTJOIN;
126 	}
127 	if ((flag & KTHREAD_INTR) != 0) {
128 		l->l_pflag |= LP_INTR;
129 	}
130 	if ((flag & KTHREAD_MPSAFE) == 0) {
131 		l->l_pflag &= ~LP_MPSAFE;
132 	}
133 
134 	/*
135 	 * Set the new LWP running, unless the caller has requested
136 	 * otherwise.
137 	 */
138 	KASSERT(l->l_stat == LSIDL);
139 	if ((flag & KTHREAD_IDLE) == 0) {
140 		setrunnable(l);
141 		/* LWP now unlocked */
142 	} else {
143 		lwp_unlock(l);
144 	}
145 	mutex_exit(proc0.p_lock);
146 
147 	/* All done! */
148 	if (lp != NULL) {
149 		*lp = l;
150 	}
151 	return 0;
152 }
153 
154 /*
155  * Cause a kernel thread to exit.  Assumes the exiting thread is the
156  * current context.
157  */
158 void
kthread_exit(int ecode)159 kthread_exit(int ecode)
160 {
161 	const char *name;
162 	lwp_t *l = curlwp;
163 
164 	/* If the kernel lock is held, we need to drop it now. */
165 	if ((l->l_pflag & LP_MPSAFE) == 0) {
166 		KERNEL_UNLOCK_LAST(l);
167 	}
168 
169 	/* We can't do much with the exit code, so just report it. */
170 	if (ecode != 0) {
171 		if ((name = l->l_name) == NULL)
172 			name = "unnamed";
173 		printf("WARNING: kthread `%s' (%d) exits with status %d\n",
174 		    name, l->l_lid, ecode);
175 	}
176 
177 	/* Barrier for joining. */
178 	if (l->l_pflag & LP_MUSTJOIN) {
179 		bool *exitedp;
180 
181 		mutex_enter(&kthread_lock);
182 		while ((exitedp = l->l_private) == NULL) {
183 			cv_wait(&kthread_cv, &kthread_lock);
184 		}
185 		KASSERT(!*exitedp);
186 		*exitedp = true;
187 		cv_broadcast(&kthread_cv);
188 		mutex_exit(&kthread_lock);
189 	}
190 
191 	/* And exit.. */
192 	lwp_exit(l);
193 	panic("kthread_exit");
194 }
195 
196 /*
197  * Wait for a kthread to exit, as pthread_join().
198  */
199 int
kthread_join(lwp_t * l)200 kthread_join(lwp_t *l)
201 {
202 	bool exited = false;
203 
204 	KASSERT((l->l_flag & LW_SYSTEM) != 0);
205 	KASSERT((l->l_pflag & LP_MUSTJOIN) != 0);
206 
207 	/*
208 	 * - Ask the kthread to write to `exited'.
209 	 * - After this, touching l is forbidden -- it may be freed.
210 	 * - Wait until the kthread has written to `exited'.
211 	 */
212 	mutex_enter(&kthread_lock);
213 	KASSERT(l->l_private == NULL);
214 	l->l_private = &exited;
215 	cv_broadcast(&kthread_cv);
216 	while (!exited) {
217 		cv_wait(&kthread_cv, &kthread_lock);
218 	}
219 	mutex_exit(&kthread_lock);
220 
221 	return 0;
222 }
223 
224 /*
225  * kthread_fpu_enter()
226  *
227  *	Allow the current lwp, which must be a kthread, to use the FPU.
228  *	Return a cookie that must be passed to kthread_fpu_exit when
229  *	done.  Must be used only in thread context.  Recursive -- you
230  *	can call kthread_fpu_enter several times in a row as long as
231  *	you pass the cookies in reverse order to kthread_fpu_exit.
232  */
233 int
kthread_fpu_enter(void)234 kthread_fpu_enter(void)
235 {
236 	struct lwp *l = curlwp;
237 	int s;
238 
239 	KASSERTMSG(!cpu_intr_p(),
240 	    "%s is not allowed in interrupt context", __func__);
241 	KASSERTMSG(!cpu_softintr_p(),
242 	    "%s is not allowed in interrupt context", __func__);
243 
244 	/*
245 	 * Remember whether this thread already had FPU access, and
246 	 * mark this thread as having FPU access.
247 	 */
248 	lwp_lock(l);
249 	KASSERTMSG(l->l_flag & LW_SYSTEM,
250 	    "%s is allowed only in kthreads", __func__);
251 	s = l->l_flag & LW_SYSTEM_FPU;
252 	l->l_flag |= LW_SYSTEM_FPU;
253 	lwp_unlock(l);
254 
255 	/* Take MD steps to enable the FPU if necessary.  */
256 	if (s == 0)
257 		kthread_fpu_enter_md();
258 
259 	return s;
260 }
261 
262 /*
263  * kthread_fpu_exit(s)
264  *
265  *	Restore the current lwp's FPU access to what it was before the
266  *	matching call to kthread_fpu_enter() that returned s.  Must be
267  *	used only in thread context.
268  */
269 void
kthread_fpu_exit(int s)270 kthread_fpu_exit(int s)
271 {
272 	struct lwp *l = curlwp;
273 
274 	KASSERT(s == (s & LW_SYSTEM_FPU));
275 	KASSERTMSG(!cpu_intr_p(),
276 	    "%s is not allowed in interrupt context", __func__);
277 	KASSERTMSG(!cpu_softintr_p(),
278 	    "%s is not allowed in interrupt context", __func__);
279 
280 	lwp_lock(l);
281 	KASSERTMSG(l->l_flag & LW_SYSTEM,
282 	    "%s is allowed only in kthreads", __func__);
283 	KASSERT(l->l_flag & LW_SYSTEM_FPU);
284 	l->l_flag ^= s ^ LW_SYSTEM_FPU;
285 	lwp_unlock(l);
286 
287 	/* Take MD steps to zero and disable the FPU if necessary.  */
288 	if (s == 0)
289 		kthread_fpu_exit_md();
290 }
291