xref: /netbsd-src/sys/kern/kern_lwp.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: kern_lwp.c,v 1.246 2021/12/22 16:57:28 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
5  *     The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Nathan J. Williams, and Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Overview
35  *
36  *	Lightweight processes (LWPs) are the basic unit or thread of
37  *	execution within the kernel.  The core state of an LWP is described
38  *	by "struct lwp", also known as lwp_t.
39  *
40  *	Each LWP is contained within a process (described by "struct proc"),
41  *	Every process contains at least one LWP, but may contain more.  The
42  *	process describes attributes shared among all of its LWPs such as a
43  *	private address space, global execution state (stopped, active,
44  *	zombie, ...), signal disposition and so on.  On a multiprocessor
45  *	machine, multiple LWPs be executing concurrently in the kernel.
46  *
47  * Execution states
48  *
49  *	At any given time, an LWP has overall state that is described by
50  *	lwp::l_stat.  The states are broken into two sets below.  The first
51  *	set is guaranteed to represent the absolute, current state of the
52  *	LWP:
53  *
54  *	LSONPROC
55  *
56  *		On processor: the LWP is executing on a CPU, either in the
57  *		kernel or in user space.
58  *
59  *	LSRUN
60  *
61  *		Runnable: the LWP is parked on a run queue, and may soon be
62  *		chosen to run by an idle processor, or by a processor that
63  *		has been asked to preempt a currently runnning but lower
64  *		priority LWP.
65  *
66  *	LSIDL
67  *
68  *		Idle: the LWP has been created but has not yet executed, or
69  *		it has ceased executing a unit of work and is waiting to be
70  *		started again.  This state exists so that the LWP can occupy
71  *		a slot in the process & PID table, but without having to
72  *		worry about being touched; lookups of the LWP by ID will
73  *		fail while in this state.  The LWP will become visible for
74  *		lookup once its state transitions further.  Some special
75  *		kernel threads also (ab)use this state to indicate that they
76  *		are idle (soft interrupts and idle LWPs).
77  *
78  *	LSSUSPENDED:
79  *
80  *		Suspended: the LWP has had its execution suspended by
81  *		another LWP in the same process using the _lwp_suspend()
82  *		system call.  User-level LWPs also enter the suspended
83  *		state when the system is shutting down.
84  *
85  *	The second set represent a "statement of intent" on behalf of the
86  *	LWP.  The LWP may in fact be executing on a processor, may be
87  *	sleeping or idle. It is expected to take the necessary action to
88  *	stop executing or become "running" again within a short timeframe.
89  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
90  *	Importantly, it indicates that its state is tied to a CPU.
91  *
92  *	LSZOMB:
93  *
94  *		Dead or dying: the LWP has released most of its resources
95  *		and is about to switch away into oblivion, or has already
96  *		switched away.  When it switches away, its few remaining
97  *		resources can be collected.
98  *
99  *	LSSLEEP:
100  *
101  *		Sleeping: the LWP has entered itself onto a sleep queue, and
102  *		has switched away or will switch away shortly to allow other
103  *		LWPs to run on the CPU.
104  *
105  *	LSSTOP:
106  *
107  *		Stopped: the LWP has been stopped as a result of a job
108  *		control signal, or as a result of the ptrace() interface.
109  *
110  *		Stopped LWPs may run briefly within the kernel to handle
111  *		signals that they receive, but will not return to user space
112  *		until their process' state is changed away from stopped.
113  *
114  *		Single LWPs within a process can not be set stopped
115  *		selectively: all actions that can stop or continue LWPs
116  *		occur at the process level.
117  *
118  * State transitions
119  *
120  *	Note that the LSSTOP state may only be set when returning to
121  *	user space in userret(), or when sleeping interruptably.  The
122  *	LSSUSPENDED state may only be set in userret().  Before setting
123  *	those states, we try to ensure that the LWPs will release all
124  *	locks that they hold, and at a minimum try to ensure that the
125  *	LWP can be set runnable again by a signal.
126  *
127  *	LWPs may transition states in the following ways:
128  *
129  *	 RUN -------> ONPROC		ONPROC -----> RUN
130  *		    				    > SLEEP
131  *		    				    > STOPPED
132  *						    > SUSPENDED
133  *						    > ZOMB
134  *						    > IDL (special cases)
135  *
136  *	 STOPPED ---> RUN		SUSPENDED --> RUN
137  *	            > SLEEP
138  *
139  *	 SLEEP -----> ONPROC		IDL --------> RUN
140  *		    > RUN			    > SUSPENDED
141  *		    > STOPPED			    > STOPPED
142  *						    > ONPROC (special cases)
143  *
144  *	Some state transitions are only possible with kernel threads (eg
145  *	ONPROC -> IDL) and happen under tightly controlled circumstances
146  *	free of unwanted side effects.
147  *
148  * Migration
149  *
150  *	Migration of threads from one CPU to another could be performed
151  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
152  *	functions.  The universal lwp_migrate() function should be used for
153  *	any other cases.  Subsystems in the kernel must be aware that CPU
154  *	of LWP may change, while it is not locked.
155  *
156  * Locking
157  *
158  *	The majority of fields in 'struct lwp' are covered by a single,
159  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
160  *	each field are documented in sys/lwp.h.
161  *
162  *	State transitions must be made with the LWP's general lock held,
163  *	and may cause the LWP's lock pointer to change.  Manipulation of
164  *	the general lock is not performed directly, but through calls to
165  *	lwp_lock(), lwp_unlock() and others.  It should be noted that the
166  *	adaptive locks are not allowed to be released while the LWP's lock
167  *	is being held (unlike for other spin-locks).
168  *
169  *	States and their associated locks:
170  *
171  *	LSIDL, LSONPROC, LSZOMB, LSSUPENDED:
172  *
173  *		Always covered by spc_lwplock, which protects LWPs not
174  *		associated with any other sync object.  This is a per-CPU
175  *		lock and matches lwp::l_cpu.
176  *
177  *	LSRUN:
178  *
179  *		Always covered by spc_mutex, which protects the run queues.
180  *		This is a per-CPU lock and matches lwp::l_cpu.
181  *
182  *	LSSLEEP:
183  *
184  *		Covered by a lock associated with the sleep queue (sometimes
185  *		a turnstile sleep queue) that the LWP resides on.  This can
186  *		be spc_lwplock for SOBJ_SLEEPQ_NULL (an "untracked" sleep).
187  *
188  *	LSSTOP:
189  *
190  *		If the LWP was previously sleeping (l_wchan != NULL), then
191  *		l_mutex references the sleep queue lock.  If the LWP was
192  *		runnable or on the CPU when halted, or has been removed from
193  *		the sleep queue since halted, then the lock is spc_lwplock.
194  *
195  *	The lock order is as follows:
196  *
197  *		sleepq -> turnstile -> spc_lwplock -> spc_mutex
198  *
199  *	Each process has a scheduler state lock (proc::p_lock), and a
200  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
201  *	so on.  When an LWP is to be entered into or removed from one of the
202  *	following states, p_lock must be held and the process wide counters
203  *	adjusted:
204  *
205  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
206  *
207  *	(But not always for kernel threads.  There are some special cases
208  *	as mentioned above: soft interrupts, and the idle loops.)
209  *
210  *	Note that an LWP is considered running or likely to run soon if in
211  *	one of the following states.  This affects the value of p_nrlwps:
212  *
213  *		LSRUN, LSONPROC, LSSLEEP
214  *
215  *	p_lock does not need to be held when transitioning among these
216  *	three states, hence p_lock is rarely taken for state transitions.
217  */
218 
219 #include <sys/cdefs.h>
220 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.246 2021/12/22 16:57:28 thorpej Exp $");
221 
222 #include "opt_ddb.h"
223 #include "opt_lockdebug.h"
224 #include "opt_dtrace.h"
225 
226 #define _LWP_API_PRIVATE
227 
228 #include <sys/param.h>
229 #include <sys/systm.h>
230 #include <sys/cpu.h>
231 #include <sys/pool.h>
232 #include <sys/proc.h>
233 #include <sys/syscallargs.h>
234 #include <sys/syscall_stats.h>
235 #include <sys/kauth.h>
236 #include <sys/sleepq.h>
237 #include <sys/lockdebug.h>
238 #include <sys/kmem.h>
239 #include <sys/pset.h>
240 #include <sys/intr.h>
241 #include <sys/lwpctl.h>
242 #include <sys/atomic.h>
243 #include <sys/filedesc.h>
244 #include <sys/fstrans.h>
245 #include <sys/dtrace_bsd.h>
246 #include <sys/sdt.h>
247 #include <sys/ptrace.h>
248 #include <sys/xcall.h>
249 #include <sys/uidinfo.h>
250 #include <sys/sysctl.h>
251 #include <sys/psref.h>
252 #include <sys/msan.h>
253 #include <sys/kcov.h>
254 #include <sys/cprng.h>
255 #include <sys/futex.h>
256 
257 #include <uvm/uvm_extern.h>
258 #include <uvm/uvm_object.h>
259 
260 static pool_cache_t	lwp_cache	__read_mostly;
261 struct lwplist		alllwp		__cacheline_aligned;
262 
263 static int		lwp_ctor(void *, void *, int);
264 static void		lwp_dtor(void *, void *);
265 
266 /* DTrace proc provider probes */
267 SDT_PROVIDER_DEFINE(proc);
268 
269 SDT_PROBE_DEFINE1(proc, kernel, , lwp__create, "struct lwp *");
270 SDT_PROBE_DEFINE1(proc, kernel, , lwp__start, "struct lwp *");
271 SDT_PROBE_DEFINE1(proc, kernel, , lwp__exit, "struct lwp *");
272 
273 struct turnstile turnstile0 __cacheline_aligned;
274 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
275 #ifdef LWP0_CPU_INFO
276 	.l_cpu = LWP0_CPU_INFO,
277 #endif
278 #ifdef LWP0_MD_INITIALIZER
279 	.l_md = LWP0_MD_INITIALIZER,
280 #endif
281 	.l_proc = &proc0,
282 	.l_lid = 0,		/* we own proc0's slot in the pid table */
283 	.l_flag = LW_SYSTEM,
284 	.l_stat = LSONPROC,
285 	.l_ts = &turnstile0,
286 	.l_syncobj = &sched_syncobj,
287 	.l_refcnt = 0,
288 	.l_priority = PRI_USER + NPRI_USER - 1,
289 	.l_inheritedprio = -1,
290 	.l_class = SCHED_OTHER,
291 	.l_psid = PS_NONE,
292 	.l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
293 	.l_name = __UNCONST("swapper"),
294 	.l_fd = &filedesc0,
295 };
296 
297 static int sysctl_kern_maxlwp(SYSCTLFN_PROTO);
298 
299 /*
300  * sysctl helper routine for kern.maxlwp. Ensures that the new
301  * values are not too low or too high.
302  */
303 static int
304 sysctl_kern_maxlwp(SYSCTLFN_ARGS)
305 {
306 	int error, nmaxlwp;
307 	struct sysctlnode node;
308 
309 	nmaxlwp = maxlwp;
310 	node = *rnode;
311 	node.sysctl_data = &nmaxlwp;
312 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
313 	if (error || newp == NULL)
314 		return error;
315 
316 	if (nmaxlwp < 0 || nmaxlwp >= 65536)
317 		return EINVAL;
318 	if (nmaxlwp > cpu_maxlwp())
319 		return EINVAL;
320 	maxlwp = nmaxlwp;
321 
322 	return 0;
323 }
324 
325 static void
326 sysctl_kern_lwp_setup(void)
327 {
328 	sysctl_createv(NULL, 0, NULL, NULL,
329 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
330 		       CTLTYPE_INT, "maxlwp",
331 		       SYSCTL_DESCR("Maximum number of simultaneous threads"),
332 		       sysctl_kern_maxlwp, 0, NULL, 0,
333 		       CTL_KERN, CTL_CREATE, CTL_EOL);
334 }
335 
336 void
337 lwpinit(void)
338 {
339 
340 	LIST_INIT(&alllwp);
341 	lwpinit_specificdata();
342 	/*
343 	 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
344 	 * calls will exit before memory of LWPs is returned to the pool, where
345 	 * KVA of LWP structure might be freed and re-used for other purposes.
346 	 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
347 	 * callers, therefore a regular passive serialization barrier will
348 	 * do the job.
349 	 */
350 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0,
351 	    PR_PSERIALIZE, "lwppl", NULL, IPL_NONE, lwp_ctor, lwp_dtor, NULL);
352 
353 	maxlwp = cpu_maxlwp();
354 	sysctl_kern_lwp_setup();
355 }
356 
357 void
358 lwp0_init(void)
359 {
360 	struct lwp *l = &lwp0;
361 
362 	KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
363 
364 	LIST_INSERT_HEAD(&alllwp, l, l_list);
365 
366 	callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
367 	callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
368 	cv_init(&l->l_sigcv, "sigwait");
369 	cv_init(&l->l_waitcv, "vfork");
370 
371 	kauth_cred_hold(proc0.p_cred);
372 	l->l_cred = proc0.p_cred;
373 
374 	kdtrace_thread_ctor(NULL, l);
375 	lwp_initspecific(l);
376 
377 	SYSCALL_TIME_LWP_INIT(l);
378 }
379 
380 /*
381  * Initialize the non-zeroed portion of an lwp_t.
382  */
383 static int
384 lwp_ctor(void *arg, void *obj, int flags)
385 {
386 	lwp_t *l = obj;
387 
388 	l->l_stat = LSIDL;
389 	l->l_cpu = curcpu();
390 	l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
391 	l->l_ts = pool_get(&turnstile_pool, flags);
392 
393 	if (l->l_ts == NULL) {
394 		return ENOMEM;
395 	} else {
396 		turnstile_ctor(l->l_ts);
397 		return 0;
398 	}
399 }
400 
401 static void
402 lwp_dtor(void *arg, void *obj)
403 {
404 	lwp_t *l = obj;
405 
406 	/*
407 	 * The value of l->l_cpu must still be valid at this point.
408 	 */
409 	KASSERT(l->l_cpu != NULL);
410 
411 	/*
412 	 * We can't return turnstile0 to the pool (it didn't come from it),
413 	 * so if it comes up just drop it quietly and move on.
414 	 */
415 	if (l->l_ts != &turnstile0)
416 		pool_put(&turnstile_pool, l->l_ts);
417 }
418 
419 /*
420  * Set an LWP suspended.
421  *
422  * Must be called with p_lock held, and the LWP locked.  Will unlock the
423  * LWP before return.
424  */
425 int
426 lwp_suspend(struct lwp *curl, struct lwp *t)
427 {
428 	int error;
429 
430 	KASSERT(mutex_owned(t->l_proc->p_lock));
431 	KASSERT(lwp_locked(t, NULL));
432 
433 	KASSERT(curl != t || curl->l_stat == LSONPROC);
434 
435 	/*
436 	 * If the current LWP has been told to exit, we must not suspend anyone
437 	 * else or deadlock could occur.  We won't return to userspace.
438 	 */
439 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
440 		lwp_unlock(t);
441 		return (EDEADLK);
442 	}
443 
444 	if ((t->l_flag & LW_DBGSUSPEND) != 0) {
445 		lwp_unlock(t);
446 		return 0;
447 	}
448 
449 	error = 0;
450 
451 	switch (t->l_stat) {
452 	case LSRUN:
453 	case LSONPROC:
454 		t->l_flag |= LW_WSUSPEND;
455 		lwp_need_userret(t);
456 		lwp_unlock(t);
457 		break;
458 
459 	case LSSLEEP:
460 		t->l_flag |= LW_WSUSPEND;
461 
462 		/*
463 		 * Kick the LWP and try to get it to the kernel boundary
464 		 * so that it will release any locks that it holds.
465 		 * setrunnable() will release the lock.
466 		 */
467 		if ((t->l_flag & LW_SINTR) != 0)
468 			setrunnable(t);
469 		else
470 			lwp_unlock(t);
471 		break;
472 
473 	case LSSUSPENDED:
474 		lwp_unlock(t);
475 		break;
476 
477 	case LSSTOP:
478 		t->l_flag |= LW_WSUSPEND;
479 		setrunnable(t);
480 		break;
481 
482 	case LSIDL:
483 	case LSZOMB:
484 		error = EINTR; /* It's what Solaris does..... */
485 		lwp_unlock(t);
486 		break;
487 	}
488 
489 	return (error);
490 }
491 
492 /*
493  * Restart a suspended LWP.
494  *
495  * Must be called with p_lock held, and the LWP locked.  Will unlock the
496  * LWP before return.
497  */
498 void
499 lwp_continue(struct lwp *l)
500 {
501 
502 	KASSERT(mutex_owned(l->l_proc->p_lock));
503 	KASSERT(lwp_locked(l, NULL));
504 
505 	/* If rebooting or not suspended, then just bail out. */
506 	if ((l->l_flag & LW_WREBOOT) != 0) {
507 		lwp_unlock(l);
508 		return;
509 	}
510 
511 	l->l_flag &= ~LW_WSUSPEND;
512 
513 	if (l->l_stat != LSSUSPENDED || (l->l_flag & LW_DBGSUSPEND) != 0) {
514 		lwp_unlock(l);
515 		return;
516 	}
517 
518 	/* setrunnable() will release the lock. */
519 	setrunnable(l);
520 }
521 
522 /*
523  * Restart a stopped LWP.
524  *
525  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
526  * LWP before return.
527  */
528 void
529 lwp_unstop(struct lwp *l)
530 {
531 	struct proc *p = l->l_proc;
532 
533 	KASSERT(mutex_owned(&proc_lock));
534 	KASSERT(mutex_owned(p->p_lock));
535 
536 	lwp_lock(l);
537 
538 	KASSERT((l->l_flag & LW_DBGSUSPEND) == 0);
539 
540 	/* If not stopped, then just bail out. */
541 	if (l->l_stat != LSSTOP) {
542 		lwp_unlock(l);
543 		return;
544 	}
545 
546 	p->p_stat = SACTIVE;
547 	p->p_sflag &= ~PS_STOPPING;
548 
549 	if (!p->p_waited)
550 		p->p_pptr->p_nstopchild--;
551 
552 	if (l->l_wchan == NULL) {
553 		/* setrunnable() will release the lock. */
554 		setrunnable(l);
555 	} else if (p->p_xsig && (l->l_flag & LW_SINTR) != 0) {
556 		/* setrunnable() so we can receive the signal */
557 		setrunnable(l);
558 	} else {
559 		l->l_stat = LSSLEEP;
560 		p->p_nrlwps++;
561 		lwp_unlock(l);
562 	}
563 }
564 
565 /*
566  * Wait for an LWP within the current process to exit.  If 'lid' is
567  * non-zero, we are waiting for a specific LWP.
568  *
569  * Must be called with p->p_lock held.
570  */
571 int
572 lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting)
573 {
574 	const lwpid_t curlid = l->l_lid;
575 	proc_t *p = l->l_proc;
576 	lwp_t *l2, *next;
577 	int error;
578 
579 	KASSERT(mutex_owned(p->p_lock));
580 
581 	p->p_nlwpwait++;
582 	l->l_waitingfor = lid;
583 
584 	for (;;) {
585 		int nfound;
586 
587 		/*
588 		 * Avoid a race between exit1() and sigexit(): if the
589 		 * process is dumping core, then we need to bail out: call
590 		 * into lwp_userret() where we will be suspended until the
591 		 * deed is done.
592 		 */
593 		if ((p->p_sflag & PS_WCORE) != 0) {
594 			mutex_exit(p->p_lock);
595 			lwp_userret(l);
596 			KASSERT(false);
597 		}
598 
599 		/*
600 		 * First off, drain any detached LWP that is waiting to be
601 		 * reaped.
602 		 */
603 		while ((l2 = p->p_zomblwp) != NULL) {
604 			p->p_zomblwp = NULL;
605 			lwp_free(l2, false, false);/* releases proc mutex */
606 			mutex_enter(p->p_lock);
607 		}
608 
609 		/*
610 		 * Now look for an LWP to collect.  If the whole process is
611 		 * exiting, count detached LWPs as eligible to be collected,
612 		 * but don't drain them here.
613 		 */
614 		nfound = 0;
615 		error = 0;
616 
617 		/*
618 		 * If given a specific LID, go via pid_table and make sure
619 		 * it's not detached.
620 		 */
621 		if (lid != 0) {
622 			l2 = proc_find_lwp(p, lid);
623 			if (l2 == NULL) {
624 				error = ESRCH;
625 				break;
626 			}
627 			KASSERT(l2->l_lid == lid);
628 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
629 				error = EINVAL;
630 				break;
631 			}
632 		} else {
633 			l2 = LIST_FIRST(&p->p_lwps);
634 		}
635 		for (; l2 != NULL; l2 = next) {
636 			next = (lid != 0 ? NULL : LIST_NEXT(l2, l_sibling));
637 
638 			/*
639 			 * If a specific wait and the target is waiting on
640 			 * us, then avoid deadlock.  This also traps LWPs
641 			 * that try to wait on themselves.
642 			 *
643 			 * Note that this does not handle more complicated
644 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
645 			 * can still be killed so it is not a major problem.
646 			 */
647 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
648 				error = EDEADLK;
649 				break;
650 			}
651 			if (l2 == l)
652 				continue;
653 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
654 				nfound += exiting;
655 				continue;
656 			}
657 			if (lid != 0) {
658 				/*
659 				 * Mark this LWP as the first waiter, if there
660 				 * is no other.
661 				 */
662 				if (l2->l_waiter == 0)
663 					l2->l_waiter = curlid;
664 			} else if (l2->l_waiter != 0) {
665 				/*
666 				 * It already has a waiter - so don't
667 				 * collect it.  If the waiter doesn't
668 				 * grab it we'll get another chance
669 				 * later.
670 				 */
671 				nfound++;
672 				continue;
673 			}
674 			nfound++;
675 
676 			/* No need to lock the LWP in order to see LSZOMB. */
677 			if (l2->l_stat != LSZOMB)
678 				continue;
679 
680 			/*
681 			 * We're no longer waiting.  Reset the "first waiter"
682 			 * pointer on the target, in case it was us.
683 			 */
684 			l->l_waitingfor = 0;
685 			l2->l_waiter = 0;
686 			p->p_nlwpwait--;
687 			if (departed)
688 				*departed = l2->l_lid;
689 			sched_lwp_collect(l2);
690 
691 			/* lwp_free() releases the proc lock. */
692 			lwp_free(l2, false, false);
693 			mutex_enter(p->p_lock);
694 			return 0;
695 		}
696 
697 		if (error != 0)
698 			break;
699 		if (nfound == 0) {
700 			error = ESRCH;
701 			break;
702 		}
703 
704 		/*
705 		 * Note: since the lock will be dropped, need to restart on
706 		 * wakeup to run all LWPs again, e.g. there may be new LWPs.
707 		 */
708 		if (exiting) {
709 			KASSERT(p->p_nlwps > 1);
710 			error = cv_timedwait(&p->p_lwpcv, p->p_lock, 1);
711 			break;
712 		}
713 
714 		/*
715 		 * Break out if all LWPs are in _lwp_wait().  There are
716 		 * other ways to hang the process with _lwp_wait(), but the
717 		 * sleep is interruptable so little point checking for them.
718 		 */
719 		if (p->p_nlwpwait == p->p_nlwps) {
720 			error = EDEADLK;
721 			break;
722 		}
723 
724 		/*
725 		 * Sit around and wait for something to happen.  We'll be
726 		 * awoken if any of the conditions examined change: if an
727 		 * LWP exits, is collected, or is detached.
728 		 */
729 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
730 			break;
731 	}
732 
733 	/*
734 	 * We didn't find any LWPs to collect, we may have received a
735 	 * signal, or some other condition has caused us to bail out.
736 	 *
737 	 * If waiting on a specific LWP, clear the waiters marker: some
738 	 * other LWP may want it.  Then, kick all the remaining waiters
739 	 * so that they can re-check for zombies and for deadlock.
740 	 */
741 	if (lid != 0) {
742 		l2 = proc_find_lwp(p, lid);
743 		KASSERT(l2 == NULL || l2->l_lid == lid);
744 
745 		if (l2 != NULL && l2->l_waiter == curlid)
746 			l2->l_waiter = 0;
747 	}
748 	p->p_nlwpwait--;
749 	l->l_waitingfor = 0;
750 	cv_broadcast(&p->p_lwpcv);
751 
752 	return error;
753 }
754 
755 /*
756  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
757  * The new LWP is created in state LSIDL and must be set running,
758  * suspended, or stopped by the caller.
759  */
760 int
761 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
762     void *stack, size_t stacksize, void (*func)(void *), void *arg,
763     lwp_t **rnewlwpp, int sclass, const sigset_t *sigmask,
764     const stack_t *sigstk)
765 {
766 	struct lwp *l2;
767 
768 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
769 
770 	/*
771 	 * Enforce limits, excluding the first lwp and kthreads.  We must
772 	 * use the process credentials here when adjusting the limit, as
773 	 * they are what's tied to the accounting entity.  However for
774 	 * authorizing the action, we'll use the LWP's credentials.
775 	 */
776 	mutex_enter(p2->p_lock);
777 	if (p2->p_nlwps != 0 && p2 != &proc0) {
778 		uid_t uid = kauth_cred_getuid(p2->p_cred);
779 		int count = chglwpcnt(uid, 1);
780 		if (__predict_false(count >
781 		    p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) {
782 			if (kauth_authorize_process(l1->l_cred,
783 			    KAUTH_PROCESS_RLIMIT, p2,
784 			    KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
785 			    &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR))
786 			    != 0) {
787 				(void)chglwpcnt(uid, -1);
788 				mutex_exit(p2->p_lock);
789 				return EAGAIN;
790 			}
791 		}
792 	}
793 
794 	/*
795 	 * First off, reap any detached LWP waiting to be collected.
796 	 * We can re-use its LWP structure and turnstile.
797 	 */
798 	if ((l2 = p2->p_zomblwp) != NULL) {
799 		p2->p_zomblwp = NULL;
800 		lwp_free(l2, true, false);
801 		/* p2 now unlocked by lwp_free() */
802 		KASSERT(l2->l_ts != NULL);
803 		KASSERT(l2->l_inheritedprio == -1);
804 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
805 		memset(&l2->l_startzero, 0, sizeof(*l2) -
806 		    offsetof(lwp_t, l_startzero));
807 	} else {
808 		mutex_exit(p2->p_lock);
809 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
810 		memset(&l2->l_startzero, 0, sizeof(*l2) -
811 		    offsetof(lwp_t, l_startzero));
812 		SLIST_INIT(&l2->l_pi_lenders);
813 	}
814 
815 	/*
816 	 * Because of lockless lookup via pid_table, the LWP can be locked
817 	 * and inspected briefly even after it's freed, so a few fields are
818 	 * kept stable.
819 	 */
820 	KASSERT(l2->l_stat == LSIDL);
821 	KASSERT(l2->l_cpu != NULL);
822 	KASSERT(l2->l_ts != NULL);
823 	KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock);
824 
825 	l2->l_proc = p2;
826 	l2->l_refcnt = 0;
827 	l2->l_class = sclass;
828 
829 	/*
830 	 * Allocate a process ID for this LWP.  We need to do this now
831 	 * while we can still unwind if it fails.  Beacuse we're marked
832 	 * as LSIDL, no lookups by the ID will succeed.
833 	 *
834 	 * N.B. this will always succeed for the first LWP in a process,
835 	 * because proc_alloc_lwpid() will usurp the slot.  Also note
836 	 * that l2->l_proc MUST be valid so that lookups of the proc
837 	 * will succeed, even if the LWP itself is not visible.
838 	 */
839 	if (__predict_false(proc_alloc_lwpid(p2, l2) == -1)) {
840 		pool_cache_put(lwp_cache, l2);
841 		return EAGAIN;
842 	}
843 
844 	/*
845 	 * If vfork(), we want the LWP to run fast and on the same CPU
846 	 * as its parent, so that it can reuse the VM context and cache
847 	 * footprint on the local CPU.
848 	 */
849 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
850 	l2->l_kpribase = PRI_KERNEL;
851 	l2->l_priority = l1->l_priority;
852 	l2->l_inheritedprio = -1;
853 	l2->l_protectprio = -1;
854 	l2->l_auxprio = -1;
855 	l2->l_flag = 0;
856 	l2->l_pflag = LP_MPSAFE;
857 	TAILQ_INIT(&l2->l_ld_locks);
858 	l2->l_psrefs = 0;
859 	kmsan_lwp_alloc(l2);
860 
861 	/*
862 	 * For vfork, borrow parent's lwpctl context if it exists.
863 	 * This also causes us to return via lwp_userret.
864 	 */
865 	if (flags & LWP_VFORK && l1->l_lwpctl) {
866 		l2->l_lwpctl = l1->l_lwpctl;
867 		l2->l_flag |= LW_LWPCTL;
868 	}
869 
870 	/*
871 	 * If not the first LWP in the process, grab a reference to the
872 	 * descriptor table.
873 	 */
874 	l2->l_fd = p2->p_fd;
875 	if (p2->p_nlwps != 0) {
876 		KASSERT(l1->l_proc == p2);
877 		fd_hold(l2);
878 	} else {
879 		KASSERT(l1->l_proc != p2);
880 	}
881 
882 	if (p2->p_flag & PK_SYSTEM) {
883 		/* Mark it as a system LWP. */
884 		l2->l_flag |= LW_SYSTEM;
885 	}
886 
887 	kdtrace_thread_ctor(NULL, l2);
888 	lwp_initspecific(l2);
889 	sched_lwp_fork(l1, l2);
890 	lwp_update_creds(l2);
891 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
892 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
893 	cv_init(&l2->l_sigcv, "sigwait");
894 	cv_init(&l2->l_waitcv, "vfork");
895 	l2->l_syncobj = &sched_syncobj;
896 	PSREF_DEBUG_INIT_LWP(l2);
897 
898 	if (rnewlwpp != NULL)
899 		*rnewlwpp = l2;
900 
901 	/*
902 	 * PCU state needs to be saved before calling uvm_lwp_fork() so that
903 	 * the MD cpu_lwp_fork() can copy the saved state to the new LWP.
904 	 */
905 	pcu_save_all(l1);
906 #if PCU_UNIT_COUNT > 0
907 	l2->l_pcu_valid = l1->l_pcu_valid;
908 #endif
909 
910 	uvm_lwp_setuarea(l2, uaddr);
911 	uvm_lwp_fork(l1, l2, stack, stacksize, func, (arg != NULL) ? arg : l2);
912 
913 	mutex_enter(p2->p_lock);
914 	if ((flags & LWP_DETACHED) != 0) {
915 		l2->l_prflag = LPR_DETACHED;
916 		p2->p_ndlwps++;
917 	} else
918 		l2->l_prflag = 0;
919 
920 	if (l1->l_proc == p2) {
921 		/*
922 		 * These flags are set while p_lock is held.  Copy with
923 		 * p_lock held too, so the LWP doesn't sneak into the
924 		 * process without them being set.
925 		 */
926 		l2->l_flag |= (l1->l_flag & (LW_WEXIT | LW_WREBOOT | LW_WCORE));
927 	} else {
928 		/* fork(): pending core/exit doesn't apply to child. */
929 		l2->l_flag |= (l1->l_flag & LW_WREBOOT);
930 	}
931 
932 	l2->l_sigstk = *sigstk;
933 	l2->l_sigmask = *sigmask;
934 	TAILQ_INIT(&l2->l_sigpend.sp_info);
935 	sigemptyset(&l2->l_sigpend.sp_set);
936 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
937 	p2->p_nlwps++;
938 	p2->p_nrlwps++;
939 
940 	KASSERT(l2->l_affinity == NULL);
941 
942 	/* Inherit the affinity mask. */
943 	if (l1->l_affinity) {
944 		/*
945 		 * Note that we hold the state lock while inheriting
946 		 * the affinity to avoid race with sched_setaffinity().
947 		 */
948 		lwp_lock(l1);
949 		if (l1->l_affinity) {
950 			kcpuset_use(l1->l_affinity);
951 			l2->l_affinity = l1->l_affinity;
952 		}
953 		lwp_unlock(l1);
954 	}
955 
956 	/* This marks the end of the "must be atomic" section. */
957 	mutex_exit(p2->p_lock);
958 
959 	SDT_PROBE(proc, kernel, , lwp__create, l2, 0, 0, 0, 0);
960 
961 	mutex_enter(&proc_lock);
962 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
963 	/* Inherit a processor-set */
964 	l2->l_psid = l1->l_psid;
965 	mutex_exit(&proc_lock);
966 
967 	SYSCALL_TIME_LWP_INIT(l2);
968 
969 	if (p2->p_emul->e_lwp_fork)
970 		(*p2->p_emul->e_lwp_fork)(l1, l2);
971 
972 	return (0);
973 }
974 
975 /*
976  * Set a new LWP running.  If the process is stopping, then the LWP is
977  * created stopped.
978  */
979 void
980 lwp_start(lwp_t *l, int flags)
981 {
982 	proc_t *p = l->l_proc;
983 
984 	mutex_enter(p->p_lock);
985 	lwp_lock(l);
986 	KASSERT(l->l_stat == LSIDL);
987 	if ((flags & LWP_SUSPENDED) != 0) {
988 		/* It'll suspend itself in lwp_userret(). */
989 		l->l_flag |= LW_WSUSPEND;
990 	}
991 	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
992 		KASSERT(l->l_wchan == NULL);
993 	    	l->l_stat = LSSTOP;
994 		p->p_nrlwps--;
995 		lwp_unlock(l);
996 	} else {
997 		setrunnable(l);
998 		/* LWP now unlocked */
999 	}
1000 	mutex_exit(p->p_lock);
1001 }
1002 
1003 /*
1004  * Called by MD code when a new LWP begins execution.  Must be called
1005  * with the previous LWP locked (so at splsched), or if there is no
1006  * previous LWP, at splsched.
1007  */
1008 void
1009 lwp_startup(struct lwp *prev, struct lwp *new_lwp)
1010 {
1011 	kmutex_t *lock;
1012 
1013 	KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
1014 	KASSERT(kpreempt_disabled());
1015 	KASSERT(prev != NULL);
1016 	KASSERT((prev->l_pflag & LP_RUNNING) != 0);
1017 	KASSERT(curcpu()->ci_mtx_count == -2);
1018 
1019 	/*
1020 	 * Immediately mark the previous LWP as no longer running and unlock
1021 	 * (to keep lock wait times short as possible).  If a zombie, don't
1022 	 * touch after clearing LP_RUNNING as it could be reaped by another
1023 	 * CPU.  Issue a memory barrier to ensure this.
1024 	 */
1025 	lock = prev->l_mutex;
1026 	if (__predict_false(prev->l_stat == LSZOMB)) {
1027 		membar_sync();
1028 	}
1029 	prev->l_pflag &= ~LP_RUNNING;
1030 	mutex_spin_exit(lock);
1031 
1032 	/* Correct spin mutex count after mi_switch(). */
1033 	curcpu()->ci_mtx_count = 0;
1034 
1035 	/* Install new VM context. */
1036 	if (__predict_true(new_lwp->l_proc->p_vmspace)) {
1037 		pmap_activate(new_lwp);
1038 	}
1039 
1040 	/* We remain at IPL_SCHED from mi_switch() - reset it. */
1041 	spl0();
1042 
1043 	LOCKDEBUG_BARRIER(NULL, 0);
1044 	SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
1045 
1046 	/* For kthreads, acquire kernel lock if not MPSAFE. */
1047 	if (__predict_false((new_lwp->l_pflag & LP_MPSAFE) == 0)) {
1048 		KERNEL_LOCK(1, new_lwp);
1049 	}
1050 }
1051 
1052 /*
1053  * Exit an LWP.
1054  *
1055  * *** WARNING *** This can be called with (l != curlwp) in error paths.
1056  */
1057 void
1058 lwp_exit(struct lwp *l)
1059 {
1060 	struct proc *p = l->l_proc;
1061 	struct lwp *l2;
1062 	bool current;
1063 
1064 	current = (l == curlwp);
1065 
1066 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
1067 	KASSERT(p == curproc);
1068 
1069 	SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
1070 
1071 	/* Verify that we hold no locks; for DIAGNOSTIC check kernel_lock. */
1072 	LOCKDEBUG_BARRIER(NULL, 0);
1073 	KASSERTMSG(curcpu()->ci_biglock_count == 0, "kernel_lock leaked");
1074 
1075 	/*
1076 	 * If we are the last live LWP in a process, we need to exit the
1077 	 * entire process.  We do so with an exit status of zero, because
1078 	 * it's a "controlled" exit, and because that's what Solaris does.
1079 	 *
1080 	 * We are not quite a zombie yet, but for accounting purposes we
1081 	 * must increment the count of zombies here.
1082 	 *
1083 	 * Note: the last LWP's specificdata will be deleted here.
1084 	 */
1085 	mutex_enter(p->p_lock);
1086 	if (p->p_nlwps - p->p_nzlwps == 1) {
1087 		KASSERT(current == true);
1088 		KASSERT(p != &proc0);
1089 		exit1(l, 0, 0);
1090 		/* NOTREACHED */
1091 	}
1092 	p->p_nzlwps++;
1093 
1094 	/*
1095 	 * Perform any required thread cleanup.  Do this early so
1096 	 * anyone wanting to look us up with lwp_getref_lwpid() will
1097 	 * fail to find us before we become a zombie.
1098 	 *
1099 	 * N.B. this will unlock p->p_lock on our behalf.
1100 	 */
1101 	lwp_thread_cleanup(l);
1102 
1103 	if (p->p_emul->e_lwp_exit)
1104 		(*p->p_emul->e_lwp_exit)(l);
1105 
1106 	/* Drop filedesc reference. */
1107 	fd_free();
1108 
1109 	/* Release fstrans private data. */
1110 	fstrans_lwp_dtor(l);
1111 
1112 	/* Delete the specificdata while it's still safe to sleep. */
1113 	lwp_finispecific(l);
1114 
1115 	/*
1116 	 * Release our cached credentials.
1117 	 */
1118 	kauth_cred_free(l->l_cred);
1119 	callout_destroy(&l->l_timeout_ch);
1120 
1121 	/*
1122 	 * If traced, report LWP exit event to the debugger.
1123 	 *
1124 	 * Remove the LWP from the global list.
1125 	 * Free its LID from the PID namespace if needed.
1126 	 */
1127 	mutex_enter(&proc_lock);
1128 
1129 	if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_EXIT)) ==
1130 	    (PSL_TRACED|PSL_TRACELWP_EXIT)) {
1131 		mutex_enter(p->p_lock);
1132 		if (ISSET(p->p_sflag, PS_WEXIT)) {
1133 			mutex_exit(p->p_lock);
1134 			/*
1135 			 * We are exiting, bail out without informing parent
1136 			 * about a terminating LWP as it would deadlock.
1137 			 */
1138 		} else {
1139 			eventswitch(TRAP_LWP, PTRACE_LWP_EXIT, l->l_lid);
1140 			mutex_enter(&proc_lock);
1141 		}
1142 	}
1143 
1144 	LIST_REMOVE(l, l_list);
1145 	mutex_exit(&proc_lock);
1146 
1147 	/*
1148 	 * Get rid of all references to the LWP that others (e.g. procfs)
1149 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
1150 	 * mark it waiting for collection in the proc structure.  Note that
1151 	 * before we can do that, we need to free any other dead, deatched
1152 	 * LWP waiting to meet its maker.
1153 	 *
1154 	 * All conditions need to be observed upon under the same hold of
1155 	 * p_lock, because if the lock is dropped any of them can change.
1156 	 */
1157 	mutex_enter(p->p_lock);
1158 	for (;;) {
1159 		if (lwp_drainrefs(l))
1160 			continue;
1161 		if ((l->l_prflag & LPR_DETACHED) != 0) {
1162 			if ((l2 = p->p_zomblwp) != NULL) {
1163 				p->p_zomblwp = NULL;
1164 				lwp_free(l2, false, false);
1165 				/* proc now unlocked */
1166 				mutex_enter(p->p_lock);
1167 				continue;
1168 			}
1169 			p->p_zomblwp = l;
1170 		}
1171 		break;
1172 	}
1173 
1174 	/*
1175 	 * If we find a pending signal for the process and we have been
1176 	 * asked to check for signals, then we lose: arrange to have
1177 	 * all other LWPs in the process check for signals.
1178 	 */
1179 	if ((l->l_flag & LW_PENDSIG) != 0 &&
1180 	    firstsig(&p->p_sigpend.sp_set) != 0) {
1181 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1182 			lwp_lock(l2);
1183 			signotify(l2);
1184 			lwp_unlock(l2);
1185 		}
1186 	}
1187 
1188 	/*
1189 	 * Release any PCU resources before becoming a zombie.
1190 	 */
1191 	pcu_discard_all(l);
1192 
1193 	lwp_lock(l);
1194 	l->l_stat = LSZOMB;
1195 	if (l->l_name != NULL) {
1196 		strcpy(l->l_name, "(zombie)");
1197 	}
1198 	lwp_unlock(l);
1199 	p->p_nrlwps--;
1200 	cv_broadcast(&p->p_lwpcv);
1201 	if (l->l_lwpctl != NULL)
1202 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
1203 	mutex_exit(p->p_lock);
1204 
1205 	/*
1206 	 * We can no longer block.  At this point, lwp_free() may already
1207 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
1208 	 *
1209 	 * Free MD LWP resources.
1210 	 */
1211 	cpu_lwp_free(l, 0);
1212 
1213 	if (current) {
1214 		/* Switch away into oblivion. */
1215 		lwp_lock(l);
1216 		spc_lock(l->l_cpu);
1217 		mi_switch(l);
1218 		panic("lwp_exit");
1219 	}
1220 }
1221 
1222 /*
1223  * Free a dead LWP's remaining resources.
1224  *
1225  * XXXLWP limits.
1226  */
1227 void
1228 lwp_free(struct lwp *l, bool recycle, bool last)
1229 {
1230 	struct proc *p = l->l_proc;
1231 	struct rusage *ru;
1232 	ksiginfoq_t kq;
1233 
1234 	KASSERT(l != curlwp);
1235 	KASSERT(last || mutex_owned(p->p_lock));
1236 
1237 	/*
1238 	 * We use the process credentials instead of the lwp credentials here
1239 	 * because the lwp credentials maybe cached (just after a setuid call)
1240 	 * and we don't want pay for syncing, since the lwp is going away
1241 	 * anyway
1242 	 */
1243 	if (p != &proc0 && p->p_nlwps != 1)
1244 		(void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
1245 
1246 	/*
1247 	 * In the unlikely event that the LWP is still on the CPU,
1248 	 * then spin until it has switched away.
1249 	 */
1250 	membar_consumer();
1251 	while (__predict_false((l->l_pflag & LP_RUNNING) != 0)) {
1252 		SPINLOCK_BACKOFF_HOOK;
1253 	}
1254 
1255 	/*
1256 	 * Now that the LWP's known off the CPU, reset its state back to
1257 	 * LSIDL, which defeats anything that might have gotten a hold on
1258 	 * the LWP via pid_table before the ID was freed.  It's important
1259 	 * to do this with both the LWP locked and p_lock held.
1260 	 *
1261 	 * Also reset the CPU and lock pointer back to curcpu(), since the
1262 	 * LWP will in all likelyhood be cached with the current CPU in
1263 	 * lwp_cache when we free it and later allocated from there again
1264 	 * (avoid incidental lock contention).
1265 	 */
1266 	lwp_lock(l);
1267 	l->l_stat = LSIDL;
1268 	l->l_cpu = curcpu();
1269 	lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_lwplock);
1270 
1271 	/*
1272 	 * If this was not the last LWP in the process, then adjust counters
1273 	 * and unlock.  This is done differently for the last LWP in exit1().
1274 	 */
1275 	if (!last) {
1276 		/*
1277 		 * Add the LWP's run time to the process' base value.
1278 		 * This needs to co-incide with coming off p_lwps.
1279 		 */
1280 		bintime_add(&p->p_rtime, &l->l_rtime);
1281 		p->p_pctcpu += l->l_pctcpu;
1282 		ru = &p->p_stats->p_ru;
1283 		ruadd(ru, &l->l_ru);
1284 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
1285 		ru->ru_nivcsw += l->l_nivcsw;
1286 		LIST_REMOVE(l, l_sibling);
1287 		p->p_nlwps--;
1288 		p->p_nzlwps--;
1289 		if ((l->l_prflag & LPR_DETACHED) != 0)
1290 			p->p_ndlwps--;
1291 
1292 		/*
1293 		 * Have any LWPs sleeping in lwp_wait() recheck for
1294 		 * deadlock.
1295 		 */
1296 		cv_broadcast(&p->p_lwpcv);
1297 		mutex_exit(p->p_lock);
1298 
1299 		/* Free the LWP ID. */
1300 		mutex_enter(&proc_lock);
1301 		proc_free_lwpid(p, l->l_lid);
1302 		mutex_exit(&proc_lock);
1303 	}
1304 
1305 	/*
1306 	 * Destroy the LWP's remaining signal information.
1307 	 */
1308 	ksiginfo_queue_init(&kq);
1309 	sigclear(&l->l_sigpend, NULL, &kq);
1310 	ksiginfo_queue_drain(&kq);
1311 	cv_destroy(&l->l_sigcv);
1312 	cv_destroy(&l->l_waitcv);
1313 
1314 	/*
1315 	 * Free lwpctl structure and affinity.
1316 	 */
1317 	if (l->l_lwpctl) {
1318 		lwp_ctl_free(l);
1319 	}
1320 	if (l->l_affinity) {
1321 		kcpuset_unuse(l->l_affinity, NULL);
1322 		l->l_affinity = NULL;
1323 	}
1324 
1325 	/*
1326 	 * Free remaining data structures and the LWP itself unless the
1327 	 * caller wants to recycle.
1328 	 */
1329 	if (l->l_name != NULL)
1330 		kmem_free(l->l_name, MAXCOMLEN);
1331 
1332 	kmsan_lwp_free(l);
1333 	kcov_lwp_free(l);
1334 	cpu_lwp_free2(l);
1335 	uvm_lwp_exit(l);
1336 
1337 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
1338 	KASSERT(l->l_inheritedprio == -1);
1339 	KASSERT(l->l_blcnt == 0);
1340 	kdtrace_thread_dtor(NULL, l);
1341 	if (!recycle)
1342 		pool_cache_put(lwp_cache, l);
1343 }
1344 
1345 /*
1346  * Migrate the LWP to the another CPU.  Unlocks the LWP.
1347  */
1348 void
1349 lwp_migrate(lwp_t *l, struct cpu_info *tci)
1350 {
1351 	struct schedstate_percpu *tspc;
1352 	int lstat = l->l_stat;
1353 
1354 	KASSERT(lwp_locked(l, NULL));
1355 	KASSERT(tci != NULL);
1356 
1357 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
1358 	if ((l->l_pflag & LP_RUNNING) != 0) {
1359 		lstat = LSONPROC;
1360 	}
1361 
1362 	/*
1363 	 * The destination CPU could be changed while previous migration
1364 	 * was not finished.
1365 	 */
1366 	if (l->l_target_cpu != NULL) {
1367 		l->l_target_cpu = tci;
1368 		lwp_unlock(l);
1369 		return;
1370 	}
1371 
1372 	/* Nothing to do if trying to migrate to the same CPU */
1373 	if (l->l_cpu == tci) {
1374 		lwp_unlock(l);
1375 		return;
1376 	}
1377 
1378 	KASSERT(l->l_target_cpu == NULL);
1379 	tspc = &tci->ci_schedstate;
1380 	switch (lstat) {
1381 	case LSRUN:
1382 		l->l_target_cpu = tci;
1383 		break;
1384 	case LSSLEEP:
1385 		l->l_cpu = tci;
1386 		break;
1387 	case LSIDL:
1388 	case LSSTOP:
1389 	case LSSUSPENDED:
1390 		l->l_cpu = tci;
1391 		if (l->l_wchan == NULL) {
1392 			lwp_unlock_to(l, tspc->spc_lwplock);
1393 			return;
1394 		}
1395 		break;
1396 	case LSONPROC:
1397 		l->l_target_cpu = tci;
1398 		spc_lock(l->l_cpu);
1399 		sched_resched_cpu(l->l_cpu, PRI_USER_RT, true);
1400 		/* spc now unlocked */
1401 		break;
1402 	}
1403 	lwp_unlock(l);
1404 }
1405 
1406 #define	lwp_find_exclude(l)					\
1407 	((l)->l_stat == LSIDL || (l)->l_stat == LSZOMB)
1408 
1409 /*
1410  * Find the LWP in the process.  Arguments may be zero, in such case,
1411  * the calling process and first LWP in the list will be used.
1412  * On success - returns proc locked.
1413  *
1414  * => pid == 0 -> look in curproc.
1415  * => pid == -1 -> match any proc.
1416  * => otherwise look up the proc.
1417  *
1418  * => lid == 0 -> first LWP in the proc
1419  * => otherwise specific LWP
1420  */
1421 struct lwp *
1422 lwp_find2(pid_t pid, lwpid_t lid)
1423 {
1424 	proc_t *p;
1425 	lwp_t *l;
1426 
1427 	/* First LWP of specified proc. */
1428 	if (lid == 0) {
1429 		switch (pid) {
1430 		case -1:
1431 			/* No lookup keys. */
1432 			return NULL;
1433 		case 0:
1434 			p = curproc;
1435 			mutex_enter(p->p_lock);
1436 			break;
1437 		default:
1438 			mutex_enter(&proc_lock);
1439 			p = proc_find(pid);
1440 			if (__predict_false(p == NULL)) {
1441 				mutex_exit(&proc_lock);
1442 				return NULL;
1443 			}
1444 			mutex_enter(p->p_lock);
1445 			mutex_exit(&proc_lock);
1446 			break;
1447 		}
1448 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1449 			if (__predict_true(!lwp_find_exclude(l)))
1450 				break;
1451 		}
1452 		goto out;
1453 	}
1454 
1455 	l = proc_find_lwp_acquire_proc(lid, &p);
1456 	if (l == NULL)
1457 		return NULL;
1458 	KASSERT(p != NULL);
1459 	KASSERT(mutex_owned(p->p_lock));
1460 
1461 	if (__predict_false(lwp_find_exclude(l))) {
1462 		l = NULL;
1463 		goto out;
1464 	}
1465 
1466 	/* Apply proc filter, if applicable. */
1467 	switch (pid) {
1468 	case -1:
1469 		/* Match anything. */
1470 		break;
1471 	case 0:
1472 		if (p != curproc)
1473 			l = NULL;
1474 		break;
1475 	default:
1476 		if (p->p_pid != pid)
1477 			l = NULL;
1478 		break;
1479 	}
1480 
1481  out:
1482 	if (__predict_false(l == NULL)) {
1483 		mutex_exit(p->p_lock);
1484 	}
1485 	return l;
1486 }
1487 
1488 /*
1489  * Look up a live LWP within the specified process.
1490  *
1491  * Must be called with p->p_lock held (as it looks at the radix tree,
1492  * and also wants to exclude idle and zombie LWPs).
1493  */
1494 struct lwp *
1495 lwp_find(struct proc *p, lwpid_t id)
1496 {
1497 	struct lwp *l;
1498 
1499 	KASSERT(mutex_owned(p->p_lock));
1500 
1501 	l = proc_find_lwp(p, id);
1502 	KASSERT(l == NULL || l->l_lid == id);
1503 
1504 	/*
1505 	 * No need to lock - all of these conditions will
1506 	 * be visible with the process level mutex held.
1507 	 */
1508 	if (__predict_false(l != NULL && lwp_find_exclude(l)))
1509 		l = NULL;
1510 
1511 	return l;
1512 }
1513 
1514 /*
1515  * Update an LWP's cached credentials to mirror the process' master copy.
1516  *
1517  * This happens early in the syscall path, on user trap, and on LWP
1518  * creation.  A long-running LWP can also voluntarily choose to update
1519  * its credentials by calling this routine.  This may be called from
1520  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1521  */
1522 void
1523 lwp_update_creds(struct lwp *l)
1524 {
1525 	kauth_cred_t oc;
1526 	struct proc *p;
1527 
1528 	p = l->l_proc;
1529 	oc = l->l_cred;
1530 
1531 	mutex_enter(p->p_lock);
1532 	kauth_cred_hold(p->p_cred);
1533 	l->l_cred = p->p_cred;
1534 	l->l_prflag &= ~LPR_CRMOD;
1535 	mutex_exit(p->p_lock);
1536 	if (oc != NULL)
1537 		kauth_cred_free(oc);
1538 }
1539 
1540 /*
1541  * Verify that an LWP is locked, and optionally verify that the lock matches
1542  * one we specify.
1543  */
1544 int
1545 lwp_locked(struct lwp *l, kmutex_t *mtx)
1546 {
1547 	kmutex_t *cur = l->l_mutex;
1548 
1549 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1550 }
1551 
1552 /*
1553  * Lend a new mutex to an LWP.  The old mutex must be held.
1554  */
1555 kmutex_t *
1556 lwp_setlock(struct lwp *l, kmutex_t *mtx)
1557 {
1558 	kmutex_t *oldmtx = l->l_mutex;
1559 
1560 	KASSERT(mutex_owned(oldmtx));
1561 
1562 	membar_exit();
1563 	l->l_mutex = mtx;
1564 	return oldmtx;
1565 }
1566 
1567 /*
1568  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
1569  * must be held.
1570  */
1571 void
1572 lwp_unlock_to(struct lwp *l, kmutex_t *mtx)
1573 {
1574 	kmutex_t *old;
1575 
1576 	KASSERT(lwp_locked(l, NULL));
1577 
1578 	old = l->l_mutex;
1579 	membar_exit();
1580 	l->l_mutex = mtx;
1581 	mutex_spin_exit(old);
1582 }
1583 
1584 int
1585 lwp_trylock(struct lwp *l)
1586 {
1587 	kmutex_t *old;
1588 
1589 	for (;;) {
1590 		if (!mutex_tryenter(old = l->l_mutex))
1591 			return 0;
1592 		if (__predict_true(l->l_mutex == old))
1593 			return 1;
1594 		mutex_spin_exit(old);
1595 	}
1596 }
1597 
1598 void
1599 lwp_unsleep(lwp_t *l, bool unlock)
1600 {
1601 
1602 	KASSERT(mutex_owned(l->l_mutex));
1603 	(*l->l_syncobj->sobj_unsleep)(l, unlock);
1604 }
1605 
1606 /*
1607  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
1608  * set.
1609  */
1610 void
1611 lwp_userret(struct lwp *l)
1612 {
1613 	struct proc *p;
1614 	int sig;
1615 
1616 	KASSERT(l == curlwp);
1617 	KASSERT(l->l_stat == LSONPROC);
1618 	p = l->l_proc;
1619 
1620 	/*
1621 	 * It is safe to do this read unlocked on a MP system..
1622 	 */
1623 	while ((l->l_flag & LW_USERRET) != 0) {
1624 		/*
1625 		 * Process pending signals first, unless the process
1626 		 * is dumping core or exiting, where we will instead
1627 		 * enter the LW_WSUSPEND case below.
1628 		 */
1629 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1630 		    LW_PENDSIG) {
1631 			mutex_enter(p->p_lock);
1632 			while ((sig = issignal(l)) != 0)
1633 				postsig(sig);
1634 			mutex_exit(p->p_lock);
1635 		}
1636 
1637 		/*
1638 		 * Core-dump or suspend pending.
1639 		 *
1640 		 * In case of core dump, suspend ourselves, so that the kernel
1641 		 * stack and therefore the userland registers saved in the
1642 		 * trapframe are around for coredump() to write them out.
1643 		 * We also need to save any PCU resources that we have so that
1644 		 * they accessible for coredump().  We issue a wakeup on
1645 		 * p->p_lwpcv so that sigexit() will write the core file out
1646 		 * once all other LWPs are suspended.
1647 		 */
1648 		if ((l->l_flag & LW_WSUSPEND) != 0) {
1649 			pcu_save_all(l);
1650 			mutex_enter(p->p_lock);
1651 			p->p_nrlwps--;
1652 			cv_broadcast(&p->p_lwpcv);
1653 			lwp_lock(l);
1654 			l->l_stat = LSSUSPENDED;
1655 			lwp_unlock(l);
1656 			mutex_exit(p->p_lock);
1657 			lwp_lock(l);
1658 			spc_lock(l->l_cpu);
1659 			mi_switch(l);
1660 		}
1661 
1662 		/* Process is exiting. */
1663 		if ((l->l_flag & LW_WEXIT) != 0) {
1664 			lwp_exit(l);
1665 			KASSERT(0);
1666 			/* NOTREACHED */
1667 		}
1668 
1669 		/* update lwpctl processor (for vfork child_return) */
1670 		if (l->l_flag & LW_LWPCTL) {
1671 			lwp_lock(l);
1672 			KASSERT(kpreempt_disabled());
1673 			l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu);
1674 			l->l_lwpctl->lc_pctr++;
1675 			l->l_flag &= ~LW_LWPCTL;
1676 			lwp_unlock(l);
1677 		}
1678 	}
1679 }
1680 
1681 /*
1682  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1683  */
1684 void
1685 lwp_need_userret(struct lwp *l)
1686 {
1687 
1688 	KASSERT(!cpu_intr_p());
1689 	KASSERT(lwp_locked(l, NULL));
1690 
1691 	/*
1692 	 * If the LWP is in any state other than LSONPROC, we know that it
1693 	 * is executing in-kernel and will hit userret() on the way out.
1694 	 *
1695 	 * If the LWP is curlwp, then we know we'll be back out to userspace
1696 	 * soon (can't be called from a hardware interrupt here).
1697 	 *
1698 	 * Otherwise, we can't be sure what the LWP is doing, so first make
1699 	 * sure the update to l_flag will be globally visible, and then
1700 	 * force the LWP to take a trip through trap() where it will do
1701 	 * userret().
1702 	 */
1703 	if (l->l_stat == LSONPROC && l != curlwp) {
1704 		membar_producer();
1705 		cpu_signotify(l);
1706 	}
1707 }
1708 
1709 /*
1710  * Add one reference to an LWP.  This will prevent the LWP from
1711  * exiting, thus keep the lwp structure and PCB around to inspect.
1712  */
1713 void
1714 lwp_addref(struct lwp *l)
1715 {
1716 	KASSERT(mutex_owned(l->l_proc->p_lock));
1717 	KASSERT(l->l_stat != LSZOMB);
1718 	l->l_refcnt++;
1719 }
1720 
1721 /*
1722  * Remove one reference to an LWP.  If this is the last reference,
1723  * then we must finalize the LWP's death.
1724  */
1725 void
1726 lwp_delref(struct lwp *l)
1727 {
1728 	struct proc *p = l->l_proc;
1729 
1730 	mutex_enter(p->p_lock);
1731 	lwp_delref2(l);
1732 	mutex_exit(p->p_lock);
1733 }
1734 
1735 /*
1736  * Remove one reference to an LWP.  If this is the last reference,
1737  * then we must finalize the LWP's death.  The proc mutex is held
1738  * on entry.
1739  */
1740 void
1741 lwp_delref2(struct lwp *l)
1742 {
1743 	struct proc *p = l->l_proc;
1744 
1745 	KASSERT(mutex_owned(p->p_lock));
1746 	KASSERT(l->l_stat != LSZOMB);
1747 	KASSERT(l->l_refcnt > 0);
1748 
1749 	if (--l->l_refcnt == 0)
1750 		cv_broadcast(&p->p_lwpcv);
1751 }
1752 
1753 /*
1754  * Drain all references to the current LWP.  Returns true if
1755  * we blocked.
1756  */
1757 bool
1758 lwp_drainrefs(struct lwp *l)
1759 {
1760 	struct proc *p = l->l_proc;
1761 	bool rv = false;
1762 
1763 	KASSERT(mutex_owned(p->p_lock));
1764 
1765 	l->l_prflag |= LPR_DRAINING;
1766 
1767 	while (l->l_refcnt > 0) {
1768 		rv = true;
1769 		cv_wait(&p->p_lwpcv, p->p_lock);
1770 	}
1771 	return rv;
1772 }
1773 
1774 /*
1775  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
1776  * be held.
1777  */
1778 bool
1779 lwp_alive(lwp_t *l)
1780 {
1781 
1782 	KASSERT(mutex_owned(l->l_proc->p_lock));
1783 
1784 	switch (l->l_stat) {
1785 	case LSSLEEP:
1786 	case LSRUN:
1787 	case LSONPROC:
1788 	case LSSTOP:
1789 	case LSSUSPENDED:
1790 		return true;
1791 	default:
1792 		return false;
1793 	}
1794 }
1795 
1796 /*
1797  * Return first live LWP in the process.
1798  */
1799 lwp_t *
1800 lwp_find_first(proc_t *p)
1801 {
1802 	lwp_t *l;
1803 
1804 	KASSERT(mutex_owned(p->p_lock));
1805 
1806 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1807 		if (lwp_alive(l)) {
1808 			return l;
1809 		}
1810 	}
1811 
1812 	return NULL;
1813 }
1814 
1815 /*
1816  * Allocate a new lwpctl structure for a user LWP.
1817  */
1818 int
1819 lwp_ctl_alloc(vaddr_t *uaddr)
1820 {
1821 	lcproc_t *lp;
1822 	u_int bit, i, offset;
1823 	struct uvm_object *uao;
1824 	int error;
1825 	lcpage_t *lcp;
1826 	proc_t *p;
1827 	lwp_t *l;
1828 
1829 	l = curlwp;
1830 	p = l->l_proc;
1831 
1832 	/* don't allow a vforked process to create lwp ctls */
1833 	if (p->p_lflag & PL_PPWAIT)
1834 		return EBUSY;
1835 
1836 	if (l->l_lcpage != NULL) {
1837 		lcp = l->l_lcpage;
1838 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1839 		return 0;
1840 	}
1841 
1842 	/* First time around, allocate header structure for the process. */
1843 	if ((lp = p->p_lwpctl) == NULL) {
1844 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1845 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1846 		lp->lp_uao = NULL;
1847 		TAILQ_INIT(&lp->lp_pages);
1848 		mutex_enter(p->p_lock);
1849 		if (p->p_lwpctl == NULL) {
1850 			p->p_lwpctl = lp;
1851 			mutex_exit(p->p_lock);
1852 		} else {
1853 			mutex_exit(p->p_lock);
1854 			mutex_destroy(&lp->lp_lock);
1855 			kmem_free(lp, sizeof(*lp));
1856 			lp = p->p_lwpctl;
1857 		}
1858 	}
1859 
1860  	/*
1861  	 * Set up an anonymous memory region to hold the shared pages.
1862  	 * Map them into the process' address space.  The user vmspace
1863  	 * gets the first reference on the UAO.
1864  	 */
1865 	mutex_enter(&lp->lp_lock);
1866 	if (lp->lp_uao == NULL) {
1867 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1868 		lp->lp_cur = 0;
1869 		lp->lp_max = LWPCTL_UAREA_SZ;
1870 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1871 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ,
1872 		     p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
1873 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1874 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1875 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1876 		if (error != 0) {
1877 			uao_detach(lp->lp_uao);
1878 			lp->lp_uao = NULL;
1879 			mutex_exit(&lp->lp_lock);
1880 			return error;
1881 		}
1882 	}
1883 
1884 	/* Get a free block and allocate for this LWP. */
1885 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1886 		if (lcp->lcp_nfree != 0)
1887 			break;
1888 	}
1889 	if (lcp == NULL) {
1890 		/* Nothing available - try to set up a free page. */
1891 		if (lp->lp_cur == lp->lp_max) {
1892 			mutex_exit(&lp->lp_lock);
1893 			return ENOMEM;
1894 		}
1895 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1896 
1897 		/*
1898 		 * Wire the next page down in kernel space.  Since this
1899 		 * is a new mapping, we must add a reference.
1900 		 */
1901 		uao = lp->lp_uao;
1902 		(*uao->pgops->pgo_reference)(uao);
1903 		lcp->lcp_kaddr = vm_map_min(kernel_map);
1904 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1905 		    uao, lp->lp_cur, PAGE_SIZE,
1906 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1907 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1908 		if (error != 0) {
1909 			mutex_exit(&lp->lp_lock);
1910 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1911 			(*uao->pgops->pgo_detach)(uao);
1912 			return error;
1913 		}
1914 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1915 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1916 		if (error != 0) {
1917 			mutex_exit(&lp->lp_lock);
1918 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
1919 			    lcp->lcp_kaddr + PAGE_SIZE);
1920 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1921 			return error;
1922 		}
1923 		/* Prepare the page descriptor and link into the list. */
1924 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1925 		lp->lp_cur += PAGE_SIZE;
1926 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
1927 		lcp->lcp_rotor = 0;
1928 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1929 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1930 	}
1931 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1932 		if (++i >= LWPCTL_BITMAP_ENTRIES)
1933 			i = 0;
1934 	}
1935 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
1936 	lcp->lcp_bitmap[i] ^= (1U << bit);
1937 	lcp->lcp_rotor = i;
1938 	lcp->lcp_nfree--;
1939 	l->l_lcpage = lcp;
1940 	offset = (i << 5) + bit;
1941 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1942 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1943 	mutex_exit(&lp->lp_lock);
1944 
1945 	KPREEMPT_DISABLE(l);
1946 	l->l_lwpctl->lc_curcpu = (int)cpu_index(curcpu());
1947 	KPREEMPT_ENABLE(l);
1948 
1949 	return 0;
1950 }
1951 
1952 /*
1953  * Free an lwpctl structure back to the per-process list.
1954  */
1955 void
1956 lwp_ctl_free(lwp_t *l)
1957 {
1958 	struct proc *p = l->l_proc;
1959 	lcproc_t *lp;
1960 	lcpage_t *lcp;
1961 	u_int map, offset;
1962 
1963 	/* don't free a lwp context we borrowed for vfork */
1964 	if (p->p_lflag & PL_PPWAIT) {
1965 		l->l_lwpctl = NULL;
1966 		return;
1967 	}
1968 
1969 	lp = p->p_lwpctl;
1970 	KASSERT(lp != NULL);
1971 
1972 	lcp = l->l_lcpage;
1973 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1974 	KASSERT(offset < LWPCTL_PER_PAGE);
1975 
1976 	mutex_enter(&lp->lp_lock);
1977 	lcp->lcp_nfree++;
1978 	map = offset >> 5;
1979 	lcp->lcp_bitmap[map] |= (1U << (offset & 31));
1980 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1981 		lcp->lcp_rotor = map;
1982 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1983 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1984 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1985 	}
1986 	mutex_exit(&lp->lp_lock);
1987 }
1988 
1989 /*
1990  * Process is exiting; tear down lwpctl state.  This can only be safely
1991  * called by the last LWP in the process.
1992  */
1993 void
1994 lwp_ctl_exit(void)
1995 {
1996 	lcpage_t *lcp, *next;
1997 	lcproc_t *lp;
1998 	proc_t *p;
1999 	lwp_t *l;
2000 
2001 	l = curlwp;
2002 	l->l_lwpctl = NULL;
2003 	l->l_lcpage = NULL;
2004 	p = l->l_proc;
2005 	lp = p->p_lwpctl;
2006 
2007 	KASSERT(lp != NULL);
2008 	KASSERT(p->p_nlwps == 1);
2009 
2010 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
2011 		next = TAILQ_NEXT(lcp, lcp_chain);
2012 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
2013 		    lcp->lcp_kaddr + PAGE_SIZE);
2014 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
2015 	}
2016 
2017 	if (lp->lp_uao != NULL) {
2018 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
2019 		    lp->lp_uva + LWPCTL_UAREA_SZ);
2020 	}
2021 
2022 	mutex_destroy(&lp->lp_lock);
2023 	kmem_free(lp, sizeof(*lp));
2024 	p->p_lwpctl = NULL;
2025 }
2026 
2027 /*
2028  * Return the current LWP's "preemption counter".  Used to detect
2029  * preemption across operations that can tolerate preemption without
2030  * crashing, but which may generate incorrect results if preempted.
2031  */
2032 uint64_t
2033 lwp_pctr(void)
2034 {
2035 
2036 	return curlwp->l_ncsw;
2037 }
2038 
2039 /*
2040  * Set an LWP's private data pointer.
2041  */
2042 int
2043 lwp_setprivate(struct lwp *l, void *ptr)
2044 {
2045 	int error = 0;
2046 
2047 	l->l_private = ptr;
2048 #ifdef __HAVE_CPU_LWP_SETPRIVATE
2049 	error = cpu_lwp_setprivate(l, ptr);
2050 #endif
2051 	return error;
2052 }
2053 
2054 /*
2055  * Perform any thread-related cleanup on LWP exit.
2056  * N.B. l->l_proc->p_lock must be HELD on entry but will
2057  * be released before returning!
2058  */
2059 void
2060 lwp_thread_cleanup(struct lwp *l)
2061 {
2062 
2063 	KASSERT(mutex_owned(l->l_proc->p_lock));
2064 	mutex_exit(l->l_proc->p_lock);
2065 
2066 	/*
2067 	 * If the LWP has robust futexes, release them all
2068 	 * now.
2069 	 */
2070 	if (__predict_false(l->l_robust_head != 0)) {
2071 		futex_release_all_lwp(l);
2072 	}
2073 }
2074 
2075 #if defined(DDB)
2076 #include <machine/pcb.h>
2077 
2078 void
2079 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2080 {
2081 	lwp_t *l;
2082 
2083 	LIST_FOREACH(l, &alllwp, l_list) {
2084 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
2085 
2086 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
2087 			continue;
2088 		}
2089 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
2090 		    (void *)addr, (void *)stack,
2091 		    (size_t)(addr - stack), l);
2092 	}
2093 }
2094 #endif /* defined(DDB) */
2095