xref: /netbsd-src/sys/kern/kern_lwp.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Overview
34  *
35  *	Lightweight processes (LWPs) are the basic unit or thread of
36  *	execution within the kernel.  The core state of an LWP is described
37  *	by "struct lwp", also known as lwp_t.
38  *
39  *	Each LWP is contained within a process (described by "struct proc"),
40  *	Every process contains at least one LWP, but may contain more.  The
41  *	process describes attributes shared among all of its LWPs such as a
42  *	private address space, global execution state (stopped, active,
43  *	zombie, ...), signal disposition and so on.  On a multiprocessor
44  *	machine, multiple LWPs be executing concurrently in the kernel.
45  *
46  * Execution states
47  *
48  *	At any given time, an LWP has overall state that is described by
49  *	lwp::l_stat.  The states are broken into two sets below.  The first
50  *	set is guaranteed to represent the absolute, current state of the
51  *	LWP:
52  *
53  *	LSONPROC
54  *
55  *		On processor: the LWP is executing on a CPU, either in the
56  *		kernel or in user space.
57  *
58  *	LSRUN
59  *
60  *		Runnable: the LWP is parked on a run queue, and may soon be
61  *		chosen to run by an idle processor, or by a processor that
62  *		has been asked to preempt a currently runnning but lower
63  *		priority LWP.
64  *
65  *	LSIDL
66  *
67  *		Idle: the LWP has been created but has not yet executed,
68  *		or it has ceased executing a unit of work and is waiting
69  *		to be started again.
70  *
71  *	LSSUSPENDED:
72  *
73  *		Suspended: the LWP has had its execution suspended by
74  *		another LWP in the same process using the _lwp_suspend()
75  *		system call.  User-level LWPs also enter the suspended
76  *		state when the system is shutting down.
77  *
78  *	The second set represent a "statement of intent" on behalf of the
79  *	LWP.  The LWP may in fact be executing on a processor, may be
80  *	sleeping or idle. It is expected to take the necessary action to
81  *	stop executing or become "running" again within a short timeframe.
82  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
83  *	Importantly, it indicates that its state is tied to a CPU.
84  *
85  *	LSZOMB:
86  *
87  *		Dead or dying: the LWP has released most of its resources
88  *		and is about to switch away into oblivion, or has already
89  *		switched away.  When it switches away, its few remaining
90  *		resources can be collected.
91  *
92  *	LSSLEEP:
93  *
94  *		Sleeping: the LWP has entered itself onto a sleep queue, and
95  *		has switched away or will switch away shortly to allow other
96  *		LWPs to run on the CPU.
97  *
98  *	LSSTOP:
99  *
100  *		Stopped: the LWP has been stopped as a result of a job
101  *		control signal, or as a result of the ptrace() interface.
102  *
103  *		Stopped LWPs may run briefly within the kernel to handle
104  *		signals that they receive, but will not return to user space
105  *		until their process' state is changed away from stopped.
106  *
107  *		Single LWPs within a process can not be set stopped
108  *		selectively: all actions that can stop or continue LWPs
109  *		occur at the process level.
110  *
111  * State transitions
112  *
113  *	Note that the LSSTOP state may only be set when returning to
114  *	user space in userret(), or when sleeping interruptably.  The
115  *	LSSUSPENDED state may only be set in userret().  Before setting
116  *	those states, we try to ensure that the LWPs will release all
117  *	locks that they hold, and at a minimum try to ensure that the
118  *	LWP can be set runnable again by a signal.
119  *
120  *	LWPs may transition states in the following ways:
121  *
122  *	 RUN -------> ONPROC		ONPROC -----> RUN
123  *		    				    > SLEEP
124  *		    				    > STOPPED
125  *						    > SUSPENDED
126  *						    > ZOMB
127  *						    > IDL (special cases)
128  *
129  *	 STOPPED ---> RUN		SUSPENDED --> RUN
130  *	            > SLEEP
131  *
132  *	 SLEEP -----> ONPROC		IDL --------> RUN
133  *		    > RUN			    > SUSPENDED
134  *		    > STOPPED			    > STOPPED
135  *						    > ONPROC (special cases)
136  *
137  *	Some state transitions are only possible with kernel threads (eg
138  *	ONPROC -> IDL) and happen under tightly controlled circumstances
139  *	free of unwanted side effects.
140  *
141  * Migration
142  *
143  *	Migration of threads from one CPU to another could be performed
144  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
145  *	functions.  The universal lwp_migrate() function should be used for
146  *	any other cases.  Subsystems in the kernel must be aware that CPU
147  *	of LWP may change, while it is not locked.
148  *
149  * Locking
150  *
151  *	The majority of fields in 'struct lwp' are covered by a single,
152  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
153  *	each field are documented in sys/lwp.h.
154  *
155  *	State transitions must be made with the LWP's general lock held,
156  *	and may cause the LWP's lock pointer to change. Manipulation of
157  *	the general lock is not performed directly, but through calls to
158  *	lwp_lock(), lwp_relock() and similar.
159  *
160  *	States and their associated locks:
161  *
162  *	LSONPROC, LSZOMB:
163  *
164  *		Always covered by spc_lwplock, which protects running LWPs.
165  *		This is a per-CPU lock and matches lwp::l_cpu.
166  *
167  *	LSIDL, LSRUN:
168  *
169  *		Always covered by spc_mutex, which protects the run queues.
170  *		This is a per-CPU lock and matches lwp::l_cpu.
171  *
172  *	LSSLEEP:
173  *
174  *		Covered by a lock associated with the sleep queue that the
175  *		LWP resides on.  Matches lwp::l_sleepq::sq_mutex.
176  *
177  *	LSSTOP, LSSUSPENDED:
178  *
179  *		If the LWP was previously sleeping (l_wchan != NULL), then
180  *		l_mutex references the sleep queue lock.  If the LWP was
181  *		runnable or on the CPU when halted, or has been removed from
182  *		the sleep queue since halted, then the lock is spc_lwplock.
183  *
184  *	The lock order is as follows:
185  *
186  *		spc::spc_lwplock ->
187  *		    sleeptab::st_mutex ->
188  *			tschain_t::tc_mutex ->
189  *			    spc::spc_mutex
190  *
191  *	Each process has an scheduler state lock (proc::p_lock), and a
192  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
193  *	so on.  When an LWP is to be entered into or removed from one of the
194  *	following states, p_lock must be held and the process wide counters
195  *	adjusted:
196  *
197  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
198  *
199  *	(But not always for kernel threads.  There are some special cases
200  *	as mentioned above.  See kern_softint.c.)
201  *
202  *	Note that an LWP is considered running or likely to run soon if in
203  *	one of the following states.  This affects the value of p_nrlwps:
204  *
205  *		LSRUN, LSONPROC, LSSLEEP
206  *
207  *	p_lock does not need to be held when transitioning among these
208  *	three states, hence p_lock is rarely taken for state transitions.
209  */
210 
211 #include <sys/cdefs.h>
212 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $");
213 
214 #include "opt_ddb.h"
215 #include "opt_lockdebug.h"
216 #include "opt_sa.h"
217 #include "opt_dtrace.h"
218 
219 #define _LWP_API_PRIVATE
220 
221 #include <sys/param.h>
222 #include <sys/systm.h>
223 #include <sys/cpu.h>
224 #include <sys/pool.h>
225 #include <sys/proc.h>
226 #include <sys/sa.h>
227 #include <sys/savar.h>
228 #include <sys/syscallargs.h>
229 #include <sys/syscall_stats.h>
230 #include <sys/kauth.h>
231 #include <sys/sleepq.h>
232 #include <sys/lockdebug.h>
233 #include <sys/kmem.h>
234 #include <sys/pset.h>
235 #include <sys/intr.h>
236 #include <sys/lwpctl.h>
237 #include <sys/atomic.h>
238 #include <sys/filedesc.h>
239 #include <sys/dtrace_bsd.h>
240 #include <sys/sdt.h>
241 
242 #include <uvm/uvm_extern.h>
243 #include <uvm/uvm_object.h>
244 
245 struct lwplist		alllwp = LIST_HEAD_INITIALIZER(alllwp);
246 static pool_cache_t	lwp_cache;
247 
248 /* DTrace proc provider probes */
249 SDT_PROBE_DEFINE(proc,,,lwp_create,
250 	"struct lwp *", NULL,
251 	NULL, NULL, NULL, NULL,
252 	NULL, NULL, NULL, NULL);
253 SDT_PROBE_DEFINE(proc,,,lwp_start,
254 	"struct lwp *", NULL,
255 	NULL, NULL, NULL, NULL,
256 	NULL, NULL, NULL, NULL);
257 SDT_PROBE_DEFINE(proc,,,lwp_exit,
258 	"struct lwp *", NULL,
259 	NULL, NULL, NULL, NULL,
260 	NULL, NULL, NULL, NULL);
261 
262 struct turnstile turnstile0;
263 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
264 #ifdef LWP0_CPU_INFO
265 	.l_cpu = LWP0_CPU_INFO,
266 #endif
267 	.l_proc = &proc0,
268 	.l_lid = 1,
269 	.l_flag = LW_SYSTEM,
270 	.l_stat = LSONPROC,
271 	.l_ts = &turnstile0,
272 	.l_syncobj = &sched_syncobj,
273 	.l_refcnt = 1,
274 	.l_priority = PRI_USER + NPRI_USER - 1,
275 	.l_inheritedprio = -1,
276 	.l_class = SCHED_OTHER,
277 	.l_psid = PS_NONE,
278 	.l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
279 	.l_name = __UNCONST("swapper"),
280 	.l_fd = &filedesc0,
281 };
282 
283 void
284 lwpinit(void)
285 {
286 
287 	lwpinit_specificdata();
288 	lwp_sys_init();
289 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
290 	    "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
291 }
292 
293 void
294 lwp0_init(void)
295 {
296 	struct lwp *l = &lwp0;
297 
298 	KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
299 	KASSERT(l->l_lid == proc0.p_nlwpid);
300 
301 	LIST_INSERT_HEAD(&alllwp, l, l_list);
302 
303 	callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
304 	callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
305 	cv_init(&l->l_sigcv, "sigwait");
306 
307 	kauth_cred_hold(proc0.p_cred);
308 	l->l_cred = proc0.p_cred;
309 
310 	lwp_initspecific(l);
311 
312 	SYSCALL_TIME_LWP_INIT(l);
313 }
314 
315 /*
316  * Set an suspended.
317  *
318  * Must be called with p_lock held, and the LWP locked.  Will unlock the
319  * LWP before return.
320  */
321 int
322 lwp_suspend(struct lwp *curl, struct lwp *t)
323 {
324 	int error;
325 
326 	KASSERT(mutex_owned(t->l_proc->p_lock));
327 	KASSERT(lwp_locked(t, NULL));
328 
329 	KASSERT(curl != t || curl->l_stat == LSONPROC);
330 
331 	/*
332 	 * If the current LWP has been told to exit, we must not suspend anyone
333 	 * else or deadlock could occur.  We won't return to userspace.
334 	 */
335 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
336 		lwp_unlock(t);
337 		return (EDEADLK);
338 	}
339 
340 	error = 0;
341 
342 	switch (t->l_stat) {
343 	case LSRUN:
344 	case LSONPROC:
345 		t->l_flag |= LW_WSUSPEND;
346 		lwp_need_userret(t);
347 		lwp_unlock(t);
348 		break;
349 
350 	case LSSLEEP:
351 		t->l_flag |= LW_WSUSPEND;
352 
353 		/*
354 		 * Kick the LWP and try to get it to the kernel boundary
355 		 * so that it will release any locks that it holds.
356 		 * setrunnable() will release the lock.
357 		 */
358 		if ((t->l_flag & LW_SINTR) != 0)
359 			setrunnable(t);
360 		else
361 			lwp_unlock(t);
362 		break;
363 
364 	case LSSUSPENDED:
365 		lwp_unlock(t);
366 		break;
367 
368 	case LSSTOP:
369 		t->l_flag |= LW_WSUSPEND;
370 		setrunnable(t);
371 		break;
372 
373 	case LSIDL:
374 	case LSZOMB:
375 		error = EINTR; /* It's what Solaris does..... */
376 		lwp_unlock(t);
377 		break;
378 	}
379 
380 	return (error);
381 }
382 
383 /*
384  * Restart a suspended LWP.
385  *
386  * Must be called with p_lock held, and the LWP locked.  Will unlock the
387  * LWP before return.
388  */
389 void
390 lwp_continue(struct lwp *l)
391 {
392 
393 	KASSERT(mutex_owned(l->l_proc->p_lock));
394 	KASSERT(lwp_locked(l, NULL));
395 
396 	/* If rebooting or not suspended, then just bail out. */
397 	if ((l->l_flag & LW_WREBOOT) != 0) {
398 		lwp_unlock(l);
399 		return;
400 	}
401 
402 	l->l_flag &= ~LW_WSUSPEND;
403 
404 	if (l->l_stat != LSSUSPENDED) {
405 		lwp_unlock(l);
406 		return;
407 	}
408 
409 	/* setrunnable() will release the lock. */
410 	setrunnable(l);
411 }
412 
413 /*
414  * Restart a stopped LWP.
415  *
416  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
417  * LWP before return.
418  */
419 void
420 lwp_unstop(struct lwp *l)
421 {
422 	struct proc *p = l->l_proc;
423 
424 	KASSERT(mutex_owned(proc_lock));
425 	KASSERT(mutex_owned(p->p_lock));
426 
427 	lwp_lock(l);
428 
429 	/* If not stopped, then just bail out. */
430 	if (l->l_stat != LSSTOP) {
431 		lwp_unlock(l);
432 		return;
433 	}
434 
435 	p->p_stat = SACTIVE;
436 	p->p_sflag &= ~PS_STOPPING;
437 
438 	if (!p->p_waited)
439 		p->p_pptr->p_nstopchild--;
440 
441 	if (l->l_wchan == NULL) {
442 		/* setrunnable() will release the lock. */
443 		setrunnable(l);
444 	} else {
445 		l->l_stat = LSSLEEP;
446 		p->p_nrlwps++;
447 		lwp_unlock(l);
448 	}
449 }
450 
451 /*
452  * Wait for an LWP within the current process to exit.  If 'lid' is
453  * non-zero, we are waiting for a specific LWP.
454  *
455  * Must be called with p->p_lock held.
456  */
457 int
458 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
459 {
460 	struct proc *p = l->l_proc;
461 	struct lwp *l2;
462 	int nfound, error;
463 	lwpid_t curlid;
464 	bool exiting;
465 
466 	KASSERT(mutex_owned(p->p_lock));
467 
468 	p->p_nlwpwait++;
469 	l->l_waitingfor = lid;
470 	curlid = l->l_lid;
471 	exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
472 
473 	for (;;) {
474 		/*
475 		 * Avoid a race between exit1() and sigexit(): if the
476 		 * process is dumping core, then we need to bail out: call
477 		 * into lwp_userret() where we will be suspended until the
478 		 * deed is done.
479 		 */
480 		if ((p->p_sflag & PS_WCORE) != 0) {
481 			mutex_exit(p->p_lock);
482 			lwp_userret(l);
483 #ifdef DIAGNOSTIC
484 			panic("lwp_wait1");
485 #endif
486 			/* NOTREACHED */
487 		}
488 
489 		/*
490 		 * First off, drain any detached LWP that is waiting to be
491 		 * reaped.
492 		 */
493 		while ((l2 = p->p_zomblwp) != NULL) {
494 			p->p_zomblwp = NULL;
495 			lwp_free(l2, false, false);/* releases proc mutex */
496 			mutex_enter(p->p_lock);
497 		}
498 
499 		/*
500 		 * Now look for an LWP to collect.  If the whole process is
501 		 * exiting, count detached LWPs as eligible to be collected,
502 		 * but don't drain them here.
503 		 */
504 		nfound = 0;
505 		error = 0;
506 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
507 			/*
508 			 * If a specific wait and the target is waiting on
509 			 * us, then avoid deadlock.  This also traps LWPs
510 			 * that try to wait on themselves.
511 			 *
512 			 * Note that this does not handle more complicated
513 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
514 			 * can still be killed so it is not a major problem.
515 			 */
516 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
517 				error = EDEADLK;
518 				break;
519 			}
520 			if (l2 == l)
521 				continue;
522 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
523 				nfound += exiting;
524 				continue;
525 			}
526 			if (lid != 0) {
527 				if (l2->l_lid != lid)
528 					continue;
529 				/*
530 				 * Mark this LWP as the first waiter, if there
531 				 * is no other.
532 				 */
533 				if (l2->l_waiter == 0)
534 					l2->l_waiter = curlid;
535 			} else if (l2->l_waiter != 0) {
536 				/*
537 				 * It already has a waiter - so don't
538 				 * collect it.  If the waiter doesn't
539 				 * grab it we'll get another chance
540 				 * later.
541 				 */
542 				nfound++;
543 				continue;
544 			}
545 			nfound++;
546 
547 			/* No need to lock the LWP in order to see LSZOMB. */
548 			if (l2->l_stat != LSZOMB)
549 				continue;
550 
551 			/*
552 			 * We're no longer waiting.  Reset the "first waiter"
553 			 * pointer on the target, in case it was us.
554 			 */
555 			l->l_waitingfor = 0;
556 			l2->l_waiter = 0;
557 			p->p_nlwpwait--;
558 			if (departed)
559 				*departed = l2->l_lid;
560 			sched_lwp_collect(l2);
561 
562 			/* lwp_free() releases the proc lock. */
563 			lwp_free(l2, false, false);
564 			mutex_enter(p->p_lock);
565 			return 0;
566 		}
567 
568 		if (error != 0)
569 			break;
570 		if (nfound == 0) {
571 			error = ESRCH;
572 			break;
573 		}
574 
575 		/*
576 		 * The kernel is careful to ensure that it can not deadlock
577 		 * when exiting - just keep waiting.
578 		 */
579 		if (exiting) {
580 			KASSERT(p->p_nlwps > 1);
581 			cv_wait(&p->p_lwpcv, p->p_lock);
582 			continue;
583 		}
584 
585 		/*
586 		 * If all other LWPs are waiting for exits or suspends
587 		 * and the supply of zombies and potential zombies is
588 		 * exhausted, then we are about to deadlock.
589 		 *
590 		 * If the process is exiting (and this LWP is not the one
591 		 * that is coordinating the exit) then bail out now.
592 		 */
593 		if ((p->p_sflag & PS_WEXIT) != 0 ||
594 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
595 			error = EDEADLK;
596 			break;
597 		}
598 
599 		/*
600 		 * Sit around and wait for something to happen.  We'll be
601 		 * awoken if any of the conditions examined change: if an
602 		 * LWP exits, is collected, or is detached.
603 		 */
604 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
605 			break;
606 	}
607 
608 	/*
609 	 * We didn't find any LWPs to collect, we may have received a
610 	 * signal, or some other condition has caused us to bail out.
611 	 *
612 	 * If waiting on a specific LWP, clear the waiters marker: some
613 	 * other LWP may want it.  Then, kick all the remaining waiters
614 	 * so that they can re-check for zombies and for deadlock.
615 	 */
616 	if (lid != 0) {
617 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
618 			if (l2->l_lid == lid) {
619 				if (l2->l_waiter == curlid)
620 					l2->l_waiter = 0;
621 				break;
622 			}
623 		}
624 	}
625 	p->p_nlwpwait--;
626 	l->l_waitingfor = 0;
627 	cv_broadcast(&p->p_lwpcv);
628 
629 	return error;
630 }
631 
632 /*
633  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
634  * The new LWP is created in state LSIDL and must be set running,
635  * suspended, or stopped by the caller.
636  */
637 int
638 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
639 	   void *stack, size_t stacksize, void (*func)(void *), void *arg,
640 	   lwp_t **rnewlwpp, int sclass)
641 {
642 	struct lwp *l2, *isfree;
643 	turnstile_t *ts;
644 	lwpid_t lid;
645 
646 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
647 
648 	/*
649 	 * First off, reap any detached LWP waiting to be collected.
650 	 * We can re-use its LWP structure and turnstile.
651 	 */
652 	isfree = NULL;
653 	if (p2->p_zomblwp != NULL) {
654 		mutex_enter(p2->p_lock);
655 		if ((isfree = p2->p_zomblwp) != NULL) {
656 			p2->p_zomblwp = NULL;
657 			lwp_free(isfree, true, false);/* releases proc mutex */
658 		} else
659 			mutex_exit(p2->p_lock);
660 	}
661 	if (isfree == NULL) {
662 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
663 		memset(l2, 0, sizeof(*l2));
664 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
665 		SLIST_INIT(&l2->l_pi_lenders);
666 	} else {
667 		l2 = isfree;
668 		ts = l2->l_ts;
669 		KASSERT(l2->l_inheritedprio == -1);
670 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
671 		memset(l2, 0, sizeof(*l2));
672 		l2->l_ts = ts;
673 	}
674 
675 	l2->l_stat = LSIDL;
676 	l2->l_proc = p2;
677 	l2->l_refcnt = 1;
678 	l2->l_class = sclass;
679 
680 	/*
681 	 * If vfork(), we want the LWP to run fast and on the same CPU
682 	 * as its parent, so that it can reuse the VM context and cache
683 	 * footprint on the local CPU.
684 	 */
685 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
686 	l2->l_kpribase = PRI_KERNEL;
687 	l2->l_priority = l1->l_priority;
688 	l2->l_inheritedprio = -1;
689 	l2->l_flag = 0;
690 	l2->l_pflag = LP_MPSAFE;
691 	TAILQ_INIT(&l2->l_ld_locks);
692 
693 	/*
694 	 * If not the first LWP in the process, grab a reference to the
695 	 * descriptor table.
696 	 */
697 	l2->l_fd = p2->p_fd;
698 	if (p2->p_nlwps != 0) {
699 		KASSERT(l1->l_proc == p2);
700 		fd_hold(l2);
701 	} else {
702 		KASSERT(l1->l_proc != p2);
703 	}
704 
705 	if (p2->p_flag & PK_SYSTEM) {
706 		/* Mark it as a system LWP. */
707 		l2->l_flag |= LW_SYSTEM;
708 	}
709 
710 	kpreempt_disable();
711 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
712 	l2->l_cpu = l1->l_cpu;
713 	kpreempt_enable();
714 
715 	kdtrace_thread_ctor(NULL, l2);
716 	lwp_initspecific(l2);
717 	sched_lwp_fork(l1, l2);
718 	lwp_update_creds(l2);
719 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
720 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
721 	cv_init(&l2->l_sigcv, "sigwait");
722 	l2->l_syncobj = &sched_syncobj;
723 
724 	if (rnewlwpp != NULL)
725 		*rnewlwpp = l2;
726 
727 	uvm_lwp_setuarea(l2, uaddr);
728 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
729 	    (arg != NULL) ? arg : l2);
730 
731 	if ((flags & LWP_PIDLID) != 0) {
732 		lid = proc_alloc_pid(p2);
733 		l2->l_pflag |= LP_PIDLID;
734 	} else {
735 		lid = 0;
736 	}
737 
738 	mutex_enter(p2->p_lock);
739 
740 	if ((flags & LWP_DETACHED) != 0) {
741 		l2->l_prflag = LPR_DETACHED;
742 		p2->p_ndlwps++;
743 	} else
744 		l2->l_prflag = 0;
745 
746 	l2->l_sigmask = l1->l_sigmask;
747 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
748 	sigemptyset(&l2->l_sigpend.sp_set);
749 
750 	if (lid == 0) {
751 		p2->p_nlwpid++;
752 		if (p2->p_nlwpid == 0)
753 			p2->p_nlwpid++;
754 		lid = p2->p_nlwpid;
755 	}
756 	l2->l_lid = lid;
757 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
758 	p2->p_nlwps++;
759 	p2->p_nrlwps++;
760 
761 	if ((p2->p_flag & PK_SYSTEM) == 0) {
762 		/* Inherit an affinity */
763 		if (l1->l_flag & LW_AFFINITY) {
764 			/*
765 			 * Note that we hold the state lock while inheriting
766 			 * the affinity to avoid race with sched_setaffinity().
767 			 */
768 			lwp_lock(l1);
769 			if (l1->l_flag & LW_AFFINITY) {
770 				kcpuset_use(l1->l_affinity);
771 				l2->l_affinity = l1->l_affinity;
772 				l2->l_flag |= LW_AFFINITY;
773 			}
774 			lwp_unlock(l1);
775 		}
776 		lwp_lock(l2);
777 		/* Inherit a processor-set */
778 		l2->l_psid = l1->l_psid;
779 		/* Look for a CPU to start */
780 		l2->l_cpu = sched_takecpu(l2);
781 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
782 	}
783 	mutex_exit(p2->p_lock);
784 
785 	SDT_PROBE(proc,,,lwp_create, l2, 0,0,0,0);
786 
787 	mutex_enter(proc_lock);
788 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
789 	mutex_exit(proc_lock);
790 
791 	SYSCALL_TIME_LWP_INIT(l2);
792 
793 	if (p2->p_emul->e_lwp_fork)
794 		(*p2->p_emul->e_lwp_fork)(l1, l2);
795 
796 	return (0);
797 }
798 
799 /*
800  * Called by MD code when a new LWP begins execution.  Must be called
801  * with the previous LWP locked (so at splsched), or if there is no
802  * previous LWP, at splsched.
803  */
804 void
805 lwp_startup(struct lwp *prev, struct lwp *new)
806 {
807 
808 	SDT_PROBE(proc,,,lwp_start, new, 0,0,0,0);
809 
810 	KASSERT(kpreempt_disabled());
811 	if (prev != NULL) {
812 		/*
813 		 * Normalize the count of the spin-mutexes, it was
814 		 * increased in mi_switch().  Unmark the state of
815 		 * context switch - it is finished for previous LWP.
816 		 */
817 		curcpu()->ci_mtx_count++;
818 		membar_exit();
819 		prev->l_ctxswtch = 0;
820 	}
821 	KPREEMPT_DISABLE(new);
822 	spl0();
823 	pmap_activate(new);
824 	LOCKDEBUG_BARRIER(NULL, 0);
825 	KPREEMPT_ENABLE(new);
826 	if ((new->l_pflag & LP_MPSAFE) == 0) {
827 		KERNEL_LOCK(1, new);
828 	}
829 }
830 
831 /*
832  * Exit an LWP.
833  */
834 void
835 lwp_exit(struct lwp *l)
836 {
837 	struct proc *p = l->l_proc;
838 	struct lwp *l2;
839 	bool current;
840 
841 	current = (l == curlwp);
842 
843 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
844 	KASSERT(p == curproc);
845 
846 	SDT_PROBE(proc,,,lwp_exit, l, 0,0,0,0);
847 
848 	/*
849 	 * Verify that we hold no locks other than the kernel lock.
850 	 */
851 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
852 
853 	/*
854 	 * If we are the last live LWP in a process, we need to exit the
855 	 * entire process.  We do so with an exit status of zero, because
856 	 * it's a "controlled" exit, and because that's what Solaris does.
857 	 *
858 	 * We are not quite a zombie yet, but for accounting purposes we
859 	 * must increment the count of zombies here.
860 	 *
861 	 * Note: the last LWP's specificdata will be deleted here.
862 	 */
863 	mutex_enter(p->p_lock);
864 	if (p->p_nlwps - p->p_nzlwps == 1) {
865 		KASSERT(current == true);
866 		/* XXXSMP kernel_lock not held */
867 		exit1(l, 0);
868 		/* NOTREACHED */
869 	}
870 	p->p_nzlwps++;
871 	mutex_exit(p->p_lock);
872 
873 	if (p->p_emul->e_lwp_exit)
874 		(*p->p_emul->e_lwp_exit)(l);
875 
876 	/* Drop filedesc reference. */
877 	fd_free();
878 
879 	/* Delete the specificdata while it's still safe to sleep. */
880 	lwp_finispecific(l);
881 
882 	/*
883 	 * Release our cached credentials.
884 	 */
885 	kauth_cred_free(l->l_cred);
886 	callout_destroy(&l->l_timeout_ch);
887 
888 	/*
889 	 * Remove the LWP from the global list.
890 	 * Free its LID from the PID namespace if needed.
891 	 */
892 	mutex_enter(proc_lock);
893 	LIST_REMOVE(l, l_list);
894 	if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) {
895 		proc_free_pid(l->l_lid);
896 	}
897 	mutex_exit(proc_lock);
898 
899 	/*
900 	 * Get rid of all references to the LWP that others (e.g. procfs)
901 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
902 	 * mark it waiting for collection in the proc structure.  Note that
903 	 * before we can do that, we need to free any other dead, deatched
904 	 * LWP waiting to meet its maker.
905 	 */
906 	mutex_enter(p->p_lock);
907 	lwp_drainrefs(l);
908 
909 	if ((l->l_prflag & LPR_DETACHED) != 0) {
910 		while ((l2 = p->p_zomblwp) != NULL) {
911 			p->p_zomblwp = NULL;
912 			lwp_free(l2, false, false);/* releases proc mutex */
913 			mutex_enter(p->p_lock);
914 			l->l_refcnt++;
915 			lwp_drainrefs(l);
916 		}
917 		p->p_zomblwp = l;
918 	}
919 
920 	/*
921 	 * If we find a pending signal for the process and we have been
922 	 * asked to check for signals, then we lose: arrange to have
923 	 * all other LWPs in the process check for signals.
924 	 */
925 	if ((l->l_flag & LW_PENDSIG) != 0 &&
926 	    firstsig(&p->p_sigpend.sp_set) != 0) {
927 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
928 			lwp_lock(l2);
929 			l2->l_flag |= LW_PENDSIG;
930 			lwp_unlock(l2);
931 		}
932 	}
933 
934 	lwp_lock(l);
935 	l->l_stat = LSZOMB;
936 	if (l->l_name != NULL)
937 		strcpy(l->l_name, "(zombie)");
938 	if (l->l_flag & LW_AFFINITY) {
939 		l->l_flag &= ~LW_AFFINITY;
940 	} else {
941 		KASSERT(l->l_affinity == NULL);
942 	}
943 	lwp_unlock(l);
944 	p->p_nrlwps--;
945 	cv_broadcast(&p->p_lwpcv);
946 	if (l->l_lwpctl != NULL)
947 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
948 	mutex_exit(p->p_lock);
949 
950 	/* Safe without lock since LWP is in zombie state */
951 	if (l->l_affinity) {
952 		kcpuset_unuse(l->l_affinity, NULL);
953 		l->l_affinity = NULL;
954 	}
955 
956 	/*
957 	 * We can no longer block.  At this point, lwp_free() may already
958 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
959 	 *
960 	 * Free MD LWP resources.
961 	 */
962 	cpu_lwp_free(l, 0);
963 
964 	if (current) {
965 		pmap_deactivate(l);
966 
967 		/*
968 		 * Release the kernel lock, and switch away into
969 		 * oblivion.
970 		 */
971 #ifdef notyet
972 		/* XXXSMP hold in lwp_userret() */
973 		KERNEL_UNLOCK_LAST(l);
974 #else
975 		KERNEL_UNLOCK_ALL(l, NULL);
976 #endif
977 		lwp_exit_switchaway(l);
978 	}
979 }
980 
981 /*
982  * Free a dead LWP's remaining resources.
983  *
984  * XXXLWP limits.
985  */
986 void
987 lwp_free(struct lwp *l, bool recycle, bool last)
988 {
989 	struct proc *p = l->l_proc;
990 	struct rusage *ru;
991 	ksiginfoq_t kq;
992 
993 	KASSERT(l != curlwp);
994 
995 	/*
996 	 * If this was not the last LWP in the process, then adjust
997 	 * counters and unlock.
998 	 */
999 	if (!last) {
1000 		/*
1001 		 * Add the LWP's run time to the process' base value.
1002 		 * This needs to co-incide with coming off p_lwps.
1003 		 */
1004 		bintime_add(&p->p_rtime, &l->l_rtime);
1005 		p->p_pctcpu += l->l_pctcpu;
1006 		ru = &p->p_stats->p_ru;
1007 		ruadd(ru, &l->l_ru);
1008 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
1009 		ru->ru_nivcsw += l->l_nivcsw;
1010 		LIST_REMOVE(l, l_sibling);
1011 		p->p_nlwps--;
1012 		p->p_nzlwps--;
1013 		if ((l->l_prflag & LPR_DETACHED) != 0)
1014 			p->p_ndlwps--;
1015 
1016 		/*
1017 		 * Have any LWPs sleeping in lwp_wait() recheck for
1018 		 * deadlock.
1019 		 */
1020 		cv_broadcast(&p->p_lwpcv);
1021 		mutex_exit(p->p_lock);
1022 	}
1023 
1024 #ifdef MULTIPROCESSOR
1025 	/*
1026 	 * In the unlikely event that the LWP is still on the CPU,
1027 	 * then spin until it has switched away.  We need to release
1028 	 * all locks to avoid deadlock against interrupt handlers on
1029 	 * the target CPU.
1030 	 */
1031 	if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
1032 		int count;
1033 		(void)count; /* XXXgcc */
1034 		KERNEL_UNLOCK_ALL(curlwp, &count);
1035 		while ((l->l_pflag & LP_RUNNING) != 0 ||
1036 		    l->l_cpu->ci_curlwp == l)
1037 			SPINLOCK_BACKOFF_HOOK;
1038 		KERNEL_LOCK(count, curlwp);
1039 	}
1040 #endif
1041 
1042 	/*
1043 	 * Destroy the LWP's remaining signal information.
1044 	 */
1045 	ksiginfo_queue_init(&kq);
1046 	sigclear(&l->l_sigpend, NULL, &kq);
1047 	ksiginfo_queue_drain(&kq);
1048 	cv_destroy(&l->l_sigcv);
1049 
1050 	/*
1051 	 * Free the LWP's turnstile and the LWP structure itself unless the
1052 	 * caller wants to recycle them.  Also, free the scheduler specific
1053 	 * data.
1054 	 *
1055 	 * We can't return turnstile0 to the pool (it didn't come from it),
1056 	 * so if it comes up just drop it quietly and move on.
1057 	 *
1058 	 * We don't recycle the VM resources at this time.
1059 	 */
1060 	if (l->l_lwpctl != NULL)
1061 		lwp_ctl_free(l);
1062 
1063 	if (!recycle && l->l_ts != &turnstile0)
1064 		pool_cache_put(turnstile_cache, l->l_ts);
1065 	if (l->l_name != NULL)
1066 		kmem_free(l->l_name, MAXCOMLEN);
1067 
1068 	cpu_lwp_free2(l);
1069 	uvm_lwp_exit(l);
1070 
1071 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
1072 	KASSERT(l->l_inheritedprio == -1);
1073 	kdtrace_thread_dtor(NULL, l);
1074 	if (!recycle)
1075 		pool_cache_put(lwp_cache, l);
1076 }
1077 
1078 /*
1079  * Migrate the LWP to the another CPU.  Unlocks the LWP.
1080  */
1081 void
1082 lwp_migrate(lwp_t *l, struct cpu_info *tci)
1083 {
1084 	struct schedstate_percpu *tspc;
1085 	int lstat = l->l_stat;
1086 
1087 	KASSERT(lwp_locked(l, NULL));
1088 	KASSERT(tci != NULL);
1089 
1090 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
1091 	if ((l->l_pflag & LP_RUNNING) != 0) {
1092 		lstat = LSONPROC;
1093 	}
1094 
1095 	/*
1096 	 * The destination CPU could be changed while previous migration
1097 	 * was not finished.
1098 	 */
1099 	if (l->l_target_cpu != NULL) {
1100 		l->l_target_cpu = tci;
1101 		lwp_unlock(l);
1102 		return;
1103 	}
1104 
1105 	/* Nothing to do if trying to migrate to the same CPU */
1106 	if (l->l_cpu == tci) {
1107 		lwp_unlock(l);
1108 		return;
1109 	}
1110 
1111 	KASSERT(l->l_target_cpu == NULL);
1112 	tspc = &tci->ci_schedstate;
1113 	switch (lstat) {
1114 	case LSRUN:
1115 		l->l_target_cpu = tci;
1116 		break;
1117 	case LSIDL:
1118 		l->l_cpu = tci;
1119 		lwp_unlock_to(l, tspc->spc_mutex);
1120 		return;
1121 	case LSSLEEP:
1122 		l->l_cpu = tci;
1123 		break;
1124 	case LSSTOP:
1125 	case LSSUSPENDED:
1126 		l->l_cpu = tci;
1127 		if (l->l_wchan == NULL) {
1128 			lwp_unlock_to(l, tspc->spc_lwplock);
1129 			return;
1130 		}
1131 		break;
1132 	case LSONPROC:
1133 		l->l_target_cpu = tci;
1134 		spc_lock(l->l_cpu);
1135 		cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
1136 		spc_unlock(l->l_cpu);
1137 		break;
1138 	}
1139 	lwp_unlock(l);
1140 }
1141 
1142 /*
1143  * Find the LWP in the process.  Arguments may be zero, in such case,
1144  * the calling process and first LWP in the list will be used.
1145  * On success - returns proc locked.
1146  */
1147 struct lwp *
1148 lwp_find2(pid_t pid, lwpid_t lid)
1149 {
1150 	proc_t *p;
1151 	lwp_t *l;
1152 
1153 	/* Find the process. */
1154 	if (pid != 0) {
1155 		mutex_enter(proc_lock);
1156 		p = proc_find(pid);
1157 		if (p == NULL) {
1158 			mutex_exit(proc_lock);
1159 			return NULL;
1160 		}
1161 		mutex_enter(p->p_lock);
1162 		mutex_exit(proc_lock);
1163 	} else {
1164 		p = curlwp->l_proc;
1165 		mutex_enter(p->p_lock);
1166 	}
1167 	/* Find the thread. */
1168 	if (lid != 0) {
1169 		l = lwp_find(p, lid);
1170 	} else {
1171 		l = LIST_FIRST(&p->p_lwps);
1172 	}
1173 	if (l == NULL) {
1174 		mutex_exit(p->p_lock);
1175 	}
1176 	return l;
1177 }
1178 
1179 /*
1180  * Look up a live LWP within the specified process, and return it locked.
1181  *
1182  * Must be called with p->p_lock held.
1183  */
1184 struct lwp *
1185 lwp_find(struct proc *p, lwpid_t id)
1186 {
1187 	struct lwp *l;
1188 
1189 	KASSERT(mutex_owned(p->p_lock));
1190 
1191 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1192 		if (l->l_lid == id)
1193 			break;
1194 	}
1195 
1196 	/*
1197 	 * No need to lock - all of these conditions will
1198 	 * be visible with the process level mutex held.
1199 	 */
1200 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1201 		l = NULL;
1202 
1203 	return l;
1204 }
1205 
1206 /*
1207  * Update an LWP's cached credentials to mirror the process' master copy.
1208  *
1209  * This happens early in the syscall path, on user trap, and on LWP
1210  * creation.  A long-running LWP can also voluntarily choose to update
1211  * it's credentials by calling this routine.  This may be called from
1212  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1213  */
1214 void
1215 lwp_update_creds(struct lwp *l)
1216 {
1217 	kauth_cred_t oc;
1218 	struct proc *p;
1219 
1220 	p = l->l_proc;
1221 	oc = l->l_cred;
1222 
1223 	mutex_enter(p->p_lock);
1224 	kauth_cred_hold(p->p_cred);
1225 	l->l_cred = p->p_cred;
1226 	l->l_prflag &= ~LPR_CRMOD;
1227 	mutex_exit(p->p_lock);
1228 	if (oc != NULL)
1229 		kauth_cred_free(oc);
1230 }
1231 
1232 /*
1233  * Verify that an LWP is locked, and optionally verify that the lock matches
1234  * one we specify.
1235  */
1236 int
1237 lwp_locked(struct lwp *l, kmutex_t *mtx)
1238 {
1239 	kmutex_t *cur = l->l_mutex;
1240 
1241 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1242 }
1243 
1244 /*
1245  * Lock an LWP.
1246  */
1247 kmutex_t *
1248 lwp_lock_retry(struct lwp *l, kmutex_t *old)
1249 {
1250 
1251 	/*
1252 	 * XXXgcc ignoring kmutex_t * volatile on i386
1253 	 *
1254 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1255 	 */
1256 #if 1
1257 	while (l->l_mutex != old) {
1258 #else
1259 	for (;;) {
1260 #endif
1261 		mutex_spin_exit(old);
1262 		old = l->l_mutex;
1263 		mutex_spin_enter(old);
1264 
1265 		/*
1266 		 * mutex_enter() will have posted a read barrier.  Re-test
1267 		 * l->l_mutex.  If it has changed, we need to try again.
1268 		 */
1269 #if 1
1270 	}
1271 #else
1272 	} while (__predict_false(l->l_mutex != old));
1273 #endif
1274 
1275 	return old;
1276 }
1277 
1278 /*
1279  * Lend a new mutex to an LWP.  The old mutex must be held.
1280  */
1281 void
1282 lwp_setlock(struct lwp *l, kmutex_t *new)
1283 {
1284 
1285 	KASSERT(mutex_owned(l->l_mutex));
1286 
1287 	membar_exit();
1288 	l->l_mutex = new;
1289 }
1290 
1291 /*
1292  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
1293  * must be held.
1294  */
1295 void
1296 lwp_unlock_to(struct lwp *l, kmutex_t *new)
1297 {
1298 	kmutex_t *old;
1299 
1300 	KASSERT(mutex_owned(l->l_mutex));
1301 
1302 	old = l->l_mutex;
1303 	membar_exit();
1304 	l->l_mutex = new;
1305 	mutex_spin_exit(old);
1306 }
1307 
1308 /*
1309  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
1310  * locked.
1311  */
1312 void
1313 lwp_relock(struct lwp *l, kmutex_t *new)
1314 {
1315 	kmutex_t *old;
1316 
1317 	KASSERT(mutex_owned(l->l_mutex));
1318 
1319 	old = l->l_mutex;
1320 	if (old != new) {
1321 		mutex_spin_enter(new);
1322 		l->l_mutex = new;
1323 		mutex_spin_exit(old);
1324 	}
1325 }
1326 
1327 int
1328 lwp_trylock(struct lwp *l)
1329 {
1330 	kmutex_t *old;
1331 
1332 	for (;;) {
1333 		if (!mutex_tryenter(old = l->l_mutex))
1334 			return 0;
1335 		if (__predict_true(l->l_mutex == old))
1336 			return 1;
1337 		mutex_spin_exit(old);
1338 	}
1339 }
1340 
1341 void
1342 lwp_unsleep(lwp_t *l, bool cleanup)
1343 {
1344 
1345 	KASSERT(mutex_owned(l->l_mutex));
1346 	(*l->l_syncobj->sobj_unsleep)(l, cleanup);
1347 }
1348 
1349 
1350 /*
1351  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
1352  * set.
1353  */
1354 void
1355 lwp_userret(struct lwp *l)
1356 {
1357 	struct proc *p;
1358 	int sig;
1359 
1360 	KASSERT(l == curlwp);
1361 	KASSERT(l->l_stat == LSONPROC);
1362 	p = l->l_proc;
1363 
1364 #ifndef __HAVE_FAST_SOFTINTS
1365 	/* Run pending soft interrupts. */
1366 	if (l->l_cpu->ci_data.cpu_softints != 0)
1367 		softint_overlay();
1368 #endif
1369 
1370 #ifdef KERN_SA
1371 	/* Generate UNBLOCKED upcall if needed */
1372 	if (l->l_flag & LW_SA_BLOCKING) {
1373 		sa_unblock_userret(l);
1374 		/* NOTREACHED */
1375 	}
1376 #endif
1377 
1378 	/*
1379 	 * It should be safe to do this read unlocked on a multiprocessor
1380 	 * system..
1381 	 *
1382 	 * LW_SA_UPCALL will be handled after the while() loop, so don't
1383 	 * consider it now.
1384 	 */
1385 	while ((l->l_flag & (LW_USERRET & ~(LW_SA_UPCALL))) != 0) {
1386 		/*
1387 		 * Process pending signals first, unless the process
1388 		 * is dumping core or exiting, where we will instead
1389 		 * enter the LW_WSUSPEND case below.
1390 		 */
1391 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1392 		    LW_PENDSIG) {
1393 			mutex_enter(p->p_lock);
1394 			while ((sig = issignal(l)) != 0)
1395 				postsig(sig);
1396 			mutex_exit(p->p_lock);
1397 		}
1398 
1399 		/*
1400 		 * Core-dump or suspend pending.
1401 		 *
1402 		 * In case of core dump, suspend ourselves, so that the
1403 		 * kernel stack and therefore the userland registers saved
1404 		 * in the trapframe are around for coredump() to write them
1405 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
1406 		 * will write the core file out once all other LWPs are
1407 		 * suspended.
1408 		 */
1409 		if ((l->l_flag & LW_WSUSPEND) != 0) {
1410 			mutex_enter(p->p_lock);
1411 			p->p_nrlwps--;
1412 			cv_broadcast(&p->p_lwpcv);
1413 			lwp_lock(l);
1414 			l->l_stat = LSSUSPENDED;
1415 			lwp_unlock(l);
1416 			mutex_exit(p->p_lock);
1417 			lwp_lock(l);
1418 			mi_switch(l);
1419 		}
1420 
1421 		/* Process is exiting. */
1422 		if ((l->l_flag & LW_WEXIT) != 0) {
1423 			lwp_exit(l);
1424 			KASSERT(0);
1425 			/* NOTREACHED */
1426 		}
1427 	}
1428 
1429 #ifdef KERN_SA
1430 	/*
1431 	 * Timer events are handled specially.  We only try once to deliver
1432 	 * pending timer upcalls; if if fails, we can try again on the next
1433 	 * loop around.  If we need to re-enter lwp_userret(), MD code will
1434 	 * bounce us back here through the trap path after we return.
1435 	 */
1436 	if (p->p_timerpend)
1437 		timerupcall(l);
1438 	if (l->l_flag & LW_SA_UPCALL)
1439 		sa_upcall_userret(l);
1440 #endif /* KERN_SA */
1441 }
1442 
1443 /*
1444  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1445  */
1446 void
1447 lwp_need_userret(struct lwp *l)
1448 {
1449 	KASSERT(lwp_locked(l, NULL));
1450 
1451 	/*
1452 	 * Since the tests in lwp_userret() are done unlocked, make sure
1453 	 * that the condition will be seen before forcing the LWP to enter
1454 	 * kernel mode.
1455 	 */
1456 	membar_producer();
1457 	cpu_signotify(l);
1458 }
1459 
1460 /*
1461  * Add one reference to an LWP.  This will prevent the LWP from
1462  * exiting, thus keep the lwp structure and PCB around to inspect.
1463  */
1464 void
1465 lwp_addref(struct lwp *l)
1466 {
1467 
1468 	KASSERT(mutex_owned(l->l_proc->p_lock));
1469 	KASSERT(l->l_stat != LSZOMB);
1470 	KASSERT(l->l_refcnt != 0);
1471 
1472 	l->l_refcnt++;
1473 }
1474 
1475 /*
1476  * Remove one reference to an LWP.  If this is the last reference,
1477  * then we must finalize the LWP's death.
1478  */
1479 void
1480 lwp_delref(struct lwp *l)
1481 {
1482 	struct proc *p = l->l_proc;
1483 
1484 	mutex_enter(p->p_lock);
1485 	lwp_delref2(l);
1486 	mutex_exit(p->p_lock);
1487 }
1488 
1489 /*
1490  * Remove one reference to an LWP.  If this is the last reference,
1491  * then we must finalize the LWP's death.  The proc mutex is held
1492  * on entry.
1493  */
1494 void
1495 lwp_delref2(struct lwp *l)
1496 {
1497 	struct proc *p = l->l_proc;
1498 
1499 	KASSERT(mutex_owned(p->p_lock));
1500 	KASSERT(l->l_stat != LSZOMB);
1501 	KASSERT(l->l_refcnt > 0);
1502 	if (--l->l_refcnt == 0)
1503 		cv_broadcast(&p->p_lwpcv);
1504 }
1505 
1506 /*
1507  * Drain all references to the current LWP.
1508  */
1509 void
1510 lwp_drainrefs(struct lwp *l)
1511 {
1512 	struct proc *p = l->l_proc;
1513 
1514 	KASSERT(mutex_owned(p->p_lock));
1515 	KASSERT(l->l_refcnt != 0);
1516 
1517 	l->l_refcnt--;
1518 	while (l->l_refcnt != 0)
1519 		cv_wait(&p->p_lwpcv, p->p_lock);
1520 }
1521 
1522 /*
1523  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
1524  * be held.
1525  */
1526 bool
1527 lwp_alive(lwp_t *l)
1528 {
1529 
1530 	KASSERT(mutex_owned(l->l_proc->p_lock));
1531 
1532 	switch (l->l_stat) {
1533 	case LSSLEEP:
1534 	case LSRUN:
1535 	case LSONPROC:
1536 	case LSSTOP:
1537 	case LSSUSPENDED:
1538 		return true;
1539 	default:
1540 		return false;
1541 	}
1542 }
1543 
1544 /*
1545  * Return first live LWP in the process.
1546  */
1547 lwp_t *
1548 lwp_find_first(proc_t *p)
1549 {
1550 	lwp_t *l;
1551 
1552 	KASSERT(mutex_owned(p->p_lock));
1553 
1554 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1555 		if (lwp_alive(l)) {
1556 			return l;
1557 		}
1558 	}
1559 
1560 	return NULL;
1561 }
1562 
1563 /*
1564  * Allocate a new lwpctl structure for a user LWP.
1565  */
1566 int
1567 lwp_ctl_alloc(vaddr_t *uaddr)
1568 {
1569 	lcproc_t *lp;
1570 	u_int bit, i, offset;
1571 	struct uvm_object *uao;
1572 	int error;
1573 	lcpage_t *lcp;
1574 	proc_t *p;
1575 	lwp_t *l;
1576 
1577 	l = curlwp;
1578 	p = l->l_proc;
1579 
1580 	if (l->l_lcpage != NULL) {
1581 		lcp = l->l_lcpage;
1582 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1583 		return 0;
1584 	}
1585 
1586 	/* First time around, allocate header structure for the process. */
1587 	if ((lp = p->p_lwpctl) == NULL) {
1588 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1589 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1590 		lp->lp_uao = NULL;
1591 		TAILQ_INIT(&lp->lp_pages);
1592 		mutex_enter(p->p_lock);
1593 		if (p->p_lwpctl == NULL) {
1594 			p->p_lwpctl = lp;
1595 			mutex_exit(p->p_lock);
1596 		} else {
1597 			mutex_exit(p->p_lock);
1598 			mutex_destroy(&lp->lp_lock);
1599 			kmem_free(lp, sizeof(*lp));
1600 			lp = p->p_lwpctl;
1601 		}
1602 	}
1603 
1604  	/*
1605  	 * Set up an anonymous memory region to hold the shared pages.
1606  	 * Map them into the process' address space.  The user vmspace
1607  	 * gets the first reference on the UAO.
1608  	 */
1609 	mutex_enter(&lp->lp_lock);
1610 	if (lp->lp_uao == NULL) {
1611 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1612 		lp->lp_cur = 0;
1613 		lp->lp_max = LWPCTL_UAREA_SZ;
1614 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1615 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
1616 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1617 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1618 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1619 		if (error != 0) {
1620 			uao_detach(lp->lp_uao);
1621 			lp->lp_uao = NULL;
1622 			mutex_exit(&lp->lp_lock);
1623 			return error;
1624 		}
1625 	}
1626 
1627 	/* Get a free block and allocate for this LWP. */
1628 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1629 		if (lcp->lcp_nfree != 0)
1630 			break;
1631 	}
1632 	if (lcp == NULL) {
1633 		/* Nothing available - try to set up a free page. */
1634 		if (lp->lp_cur == lp->lp_max) {
1635 			mutex_exit(&lp->lp_lock);
1636 			return ENOMEM;
1637 		}
1638 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1639 		if (lcp == NULL) {
1640 			mutex_exit(&lp->lp_lock);
1641 			return ENOMEM;
1642 		}
1643 		/*
1644 		 * Wire the next page down in kernel space.  Since this
1645 		 * is a new mapping, we must add a reference.
1646 		 */
1647 		uao = lp->lp_uao;
1648 		(*uao->pgops->pgo_reference)(uao);
1649 		lcp->lcp_kaddr = vm_map_min(kernel_map);
1650 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1651 		    uao, lp->lp_cur, PAGE_SIZE,
1652 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1653 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1654 		if (error != 0) {
1655 			mutex_exit(&lp->lp_lock);
1656 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1657 			(*uao->pgops->pgo_detach)(uao);
1658 			return error;
1659 		}
1660 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1661 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1662 		if (error != 0) {
1663 			mutex_exit(&lp->lp_lock);
1664 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
1665 			    lcp->lcp_kaddr + PAGE_SIZE);
1666 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1667 			return error;
1668 		}
1669 		/* Prepare the page descriptor and link into the list. */
1670 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1671 		lp->lp_cur += PAGE_SIZE;
1672 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
1673 		lcp->lcp_rotor = 0;
1674 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1675 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1676 	}
1677 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1678 		if (++i >= LWPCTL_BITMAP_ENTRIES)
1679 			i = 0;
1680 	}
1681 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
1682 	lcp->lcp_bitmap[i] ^= (1 << bit);
1683 	lcp->lcp_rotor = i;
1684 	lcp->lcp_nfree--;
1685 	l->l_lcpage = lcp;
1686 	offset = (i << 5) + bit;
1687 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1688 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1689 	mutex_exit(&lp->lp_lock);
1690 
1691 	KPREEMPT_DISABLE(l);
1692 	l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
1693 	KPREEMPT_ENABLE(l);
1694 
1695 	return 0;
1696 }
1697 
1698 /*
1699  * Free an lwpctl structure back to the per-process list.
1700  */
1701 void
1702 lwp_ctl_free(lwp_t *l)
1703 {
1704 	lcproc_t *lp;
1705 	lcpage_t *lcp;
1706 	u_int map, offset;
1707 
1708 	lp = l->l_proc->p_lwpctl;
1709 	KASSERT(lp != NULL);
1710 
1711 	lcp = l->l_lcpage;
1712 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1713 	KASSERT(offset < LWPCTL_PER_PAGE);
1714 
1715 	mutex_enter(&lp->lp_lock);
1716 	lcp->lcp_nfree++;
1717 	map = offset >> 5;
1718 	lcp->lcp_bitmap[map] |= (1 << (offset & 31));
1719 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1720 		lcp->lcp_rotor = map;
1721 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1722 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1723 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1724 	}
1725 	mutex_exit(&lp->lp_lock);
1726 }
1727 
1728 /*
1729  * Process is exiting; tear down lwpctl state.  This can only be safely
1730  * called by the last LWP in the process.
1731  */
1732 void
1733 lwp_ctl_exit(void)
1734 {
1735 	lcpage_t *lcp, *next;
1736 	lcproc_t *lp;
1737 	proc_t *p;
1738 	lwp_t *l;
1739 
1740 	l = curlwp;
1741 	l->l_lwpctl = NULL;
1742 	l->l_lcpage = NULL;
1743 	p = l->l_proc;
1744 	lp = p->p_lwpctl;
1745 
1746 	KASSERT(lp != NULL);
1747 	KASSERT(p->p_nlwps == 1);
1748 
1749 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
1750 		next = TAILQ_NEXT(lcp, lcp_chain);
1751 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
1752 		    lcp->lcp_kaddr + PAGE_SIZE);
1753 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1754 	}
1755 
1756 	if (lp->lp_uao != NULL) {
1757 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
1758 		    lp->lp_uva + LWPCTL_UAREA_SZ);
1759 	}
1760 
1761 	mutex_destroy(&lp->lp_lock);
1762 	kmem_free(lp, sizeof(*lp));
1763 	p->p_lwpctl = NULL;
1764 }
1765 
1766 /*
1767  * Return the current LWP's "preemption counter".  Used to detect
1768  * preemption across operations that can tolerate preemption without
1769  * crashing, but which may generate incorrect results if preempted.
1770  */
1771 uint64_t
1772 lwp_pctr(void)
1773 {
1774 
1775 	return curlwp->l_ncsw;
1776 }
1777 
1778 /*
1779  * Set an LWP's private data pointer.
1780  */
1781 int
1782 lwp_setprivate(struct lwp *l, void *ptr)
1783 {
1784 	int error = 0;
1785 
1786 	l->l_private = ptr;
1787 #ifdef __HAVE_CPU_LWP_SETPRIVATE
1788 	error = cpu_lwp_setprivate(l, ptr);
1789 #endif
1790 	return error;
1791 }
1792 
1793 #if defined(DDB)
1794 void
1795 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1796 {
1797 	lwp_t *l;
1798 
1799 	LIST_FOREACH(l, &alllwp, l_list) {
1800 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
1801 
1802 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
1803 			continue;
1804 		}
1805 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
1806 		    (void *)addr, (void *)stack,
1807 		    (size_t)(addr - stack), l);
1808 	}
1809 }
1810 #endif /* defined(DDB) */
1811