xref: /netbsd-src/sys/kern/kern_kthread.c (revision afab4e300d3a9fb07dd8c80daf53d0feb3345706)
1 /*	$NetBSD: kern_kthread.c,v 1.47 2022/09/13 09:37:49 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2007, 2009, 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.47 2022/09/13 09:37:49 riastradh Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/cpu.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/mutex.h>
42 #include <sys/sched.h>
43 #include <sys/kmem.h>
44 #include <sys/msan.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 static lwp_t *		kthread_jtarget;
49 static kmutex_t		kthread_lock;
50 static kcondvar_t	kthread_cv;
51 
52 void
53 kthread_sysinit(void)
54 {
55 
56 	mutex_init(&kthread_lock, MUTEX_DEFAULT, IPL_NONE);
57 	cv_init(&kthread_cv, "kthrwait");
58 	kthread_jtarget = NULL;
59 }
60 
61 /*
62  * kthread_create: create a kernel thread, that is, system-only LWP.
63  */
64 int
65 kthread_create(pri_t pri, int flag, struct cpu_info *ci,
66     void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...)
67 {
68 	lwp_t *l;
69 	vaddr_t uaddr;
70 	int error, lc;
71 	va_list ap;
72 
73 	KASSERT((flag & KTHREAD_INTR) == 0 || (flag & KTHREAD_MPSAFE) != 0);
74 
75 	uaddr = uvm_uarea_system_alloc(
76 	   (flag & (KTHREAD_INTR|KTHREAD_IDLE)) == KTHREAD_IDLE ? ci : NULL);
77 	if (uaddr == 0) {
78 		return ENOMEM;
79 	}
80 	kmsan_orig((void *)uaddr, USPACE, KMSAN_TYPE_POOL, __RET_ADDR);
81 	if ((flag & KTHREAD_TS) != 0) {
82 		lc = SCHED_OTHER;
83 	} else {
84 		lc = SCHED_RR;
85 	}
86 
87 	error = lwp_create(&lwp0, &proc0, uaddr, LWP_DETACHED, NULL,
88 	    0, func, arg, &l, lc, &lwp0.l_sigmask, &lwp0.l_sigstk);
89 	if (error) {
90 		uvm_uarea_system_free(uaddr);
91 		return error;
92 	}
93 	if (fmt != NULL) {
94 		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
95 		va_start(ap, fmt);
96 		vsnprintf(l->l_name, MAXCOMLEN, fmt, ap);
97 		va_end(ap);
98 	}
99 
100 	/*
101 	 * Set parameters.
102 	 */
103 	if (pri == PRI_NONE) {
104 		if ((flag & KTHREAD_TS) != 0) {
105 			/* Maximum user priority level. */
106 			pri = MAXPRI_USER;
107 		} else {
108 			/* Minimum kernel priority level. */
109 			pri = PRI_KTHREAD;
110 		}
111 	}
112 	mutex_enter(proc0.p_lock);
113 	lwp_lock(l);
114 	lwp_changepri(l, pri);
115 	if (ci != NULL) {
116 		if (ci != l->l_cpu) {
117 			lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
118 			lwp_lock(l);
119 		}
120 		l->l_pflag |= LP_BOUND;
121 		l->l_cpu = ci;
122 	}
123 
124 	if ((flag & KTHREAD_MUSTJOIN) != 0) {
125 		KASSERT(lp != NULL);
126 		l->l_pflag |= LP_MUSTJOIN;
127 	}
128 	if ((flag & KTHREAD_INTR) != 0) {
129 		l->l_pflag |= LP_INTR;
130 	}
131 	if ((flag & KTHREAD_MPSAFE) == 0) {
132 		l->l_pflag &= ~LP_MPSAFE;
133 	}
134 
135 	/*
136 	 * Set the new LWP running, unless the caller has requested
137 	 * otherwise.
138 	 */
139 	KASSERT(l->l_stat == LSIDL);
140 	if ((flag & KTHREAD_IDLE) == 0) {
141 		setrunnable(l);
142 		/* LWP now unlocked */
143 	} else {
144 		lwp_unlock(l);
145 	}
146 	mutex_exit(proc0.p_lock);
147 
148 	/* All done! */
149 	if (lp != NULL) {
150 		*lp = l;
151 	}
152 	return 0;
153 }
154 
155 /*
156  * Cause a kernel thread to exit.  Assumes the exiting thread is the
157  * current context.
158  */
159 void
160 kthread_exit(int ecode)
161 {
162 	const char *name;
163 	lwp_t *l = curlwp;
164 
165 	/* We can't do much with the exit code, so just report it. */
166 	if (ecode != 0) {
167 		if ((name = l->l_name) == NULL)
168 			name = "unnamed";
169 		printf("WARNING: kthread `%s' (%d) exits with status %d\n",
170 		    name, l->l_lid, ecode);
171 	}
172 
173 	/* Barrier for joining. */
174 	if (l->l_pflag & LP_MUSTJOIN) {
175 		mutex_enter(&kthread_lock);
176 		while (kthread_jtarget != l) {
177 			cv_wait(&kthread_cv, &kthread_lock);
178 		}
179 		kthread_jtarget = NULL;
180 		cv_broadcast(&kthread_cv);
181 		mutex_exit(&kthread_lock);
182 	}
183 
184 	/* If the kernel lock is held, we need to drop it now. */
185 	if ((l->l_pflag & LP_MPSAFE) == 0) {
186 		KERNEL_UNLOCK_LAST(l);
187 	}
188 
189 	/* And exit.. */
190 	lwp_exit(l);
191 	panic("kthread_exit");
192 }
193 
194 /*
195  * Wait for a kthread to exit, as pthread_join().
196  */
197 int
198 kthread_join(lwp_t *l)
199 {
200 
201 	KASSERT((l->l_flag & LW_SYSTEM) != 0);
202 	KASSERT((l->l_pflag & LP_MUSTJOIN) != 0);
203 
204 	/*
205 	 * - Wait if some other thread has occupied the target.
206 	 * - Specify our kthread as a target and notify it.
207 	 * - Wait for the target kthread to notify us.
208 	 */
209 	mutex_enter(&kthread_lock);
210 	while (kthread_jtarget) {
211 		cv_wait(&kthread_cv, &kthread_lock);
212 	}
213 	kthread_jtarget = l;
214 	cv_broadcast(&kthread_cv);
215 	while (kthread_jtarget == l) {
216 		cv_wait(&kthread_cv, &kthread_lock);
217 	}
218 	mutex_exit(&kthread_lock);
219 
220 	return 0;
221 }
222 
223 /*
224  * kthread_fpu_enter()
225  *
226  *	Allow the current lwp, which must be a kthread, to use the FPU.
227  *	Return a cookie that must be passed to kthread_fpu_exit when
228  *	done.  Must be used only in thread context.  Recursive -- you
229  *	can call kthread_fpu_enter several times in a row as long as
230  *	you pass the cookies in reverse order to kthread_fpu_exit.
231  */
232 int
233 kthread_fpu_enter(void)
234 {
235 	struct lwp *l = curlwp;
236 	int s;
237 
238 	KASSERTMSG(!cpu_intr_p(),
239 	    "%s is not allowed in interrupt context", __func__);
240 	KASSERTMSG(!cpu_softintr_p(),
241 	    "%s is not allowed in interrupt context", __func__);
242 
243 	/*
244 	 * Remember whether this thread already had FPU access, and
245 	 * mark this thread as having FPU access.
246 	 */
247 	lwp_lock(l);
248 	KASSERTMSG(l->l_flag & LW_SYSTEM,
249 	    "%s is allowed only in kthreads", __func__);
250 	s = l->l_flag & LW_SYSTEM_FPU;
251 	l->l_flag |= LW_SYSTEM_FPU;
252 	lwp_unlock(l);
253 
254 	/* Take MD steps to enable the FPU if necessary.  */
255 	if (s == 0)
256 		kthread_fpu_enter_md();
257 
258 	return s;
259 }
260 
261 /*
262  * kthread_fpu_exit(s)
263  *
264  *	Restore the current lwp's FPU access to what it was before the
265  *	matching call to kthread_fpu_enter() that returned s.  Must be
266  *	used only in thread context.
267  */
268 void
269 kthread_fpu_exit(int s)
270 {
271 	struct lwp *l = curlwp;
272 
273 	KASSERT(s == (s & LW_SYSTEM_FPU));
274 	KASSERTMSG(!cpu_intr_p(),
275 	    "%s is not allowed in interrupt context", __func__);
276 	KASSERTMSG(!cpu_softintr_p(),
277 	    "%s is not allowed in interrupt context", __func__);
278 
279 	lwp_lock(l);
280 	KASSERTMSG(l->l_flag & LW_SYSTEM,
281 	    "%s is allowed only in kthreads", __func__);
282 	KASSERT(l->l_flag & LW_SYSTEM_FPU);
283 	l->l_flag ^= s ^ LW_SYSTEM_FPU;
284 	lwp_unlock(l);
285 
286 	/* Take MD steps to zero and disable the FPU if necessary.  */
287 	if (s == 0)
288 		kthread_fpu_exit_md();
289 }
290