xref: /netbsd-src/sys/kern/kern_lwp.c (revision 6a1508dad3515842aa76bf5ec8fc2daab5f5af02)
1 /*	$NetBSD: kern_lwp.c,v 1.128 2009/03/03 21:55:06 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Overview
34  *
35  *	Lightweight processes (LWPs) are the basic unit or thread of
36  *	execution within the kernel.  The core state of an LWP is described
37  *	by "struct lwp", also known as lwp_t.
38  *
39  *	Each LWP is contained within a process (described by "struct proc"),
40  *	Every process contains at least one LWP, but may contain more.  The
41  *	process describes attributes shared among all of its LWPs such as a
42  *	private address space, global execution state (stopped, active,
43  *	zombie, ...), signal disposition and so on.  On a multiprocessor
44  *	machine, multiple LWPs be executing concurrently in the kernel.
45  *
46  * Execution states
47  *
48  *	At any given time, an LWP has overall state that is described by
49  *	lwp::l_stat.  The states are broken into two sets below.  The first
50  *	set is guaranteed to represent the absolute, current state of the
51  *	LWP:
52  *
53  *	LSONPROC
54  *
55  *		On processor: the LWP is executing on a CPU, either in the
56  *		kernel or in user space.
57  *
58  *	LSRUN
59  *
60  *		Runnable: the LWP is parked on a run queue, and may soon be
61  *		chosen to run by an idle processor, or by a processor that
62  *		has been asked to preempt a currently runnning but lower
63  *		priority LWP.  If the LWP is not swapped in (LW_INMEM == 0)
64  *		then the LWP is not on a run queue, but may be soon.
65  *
66  *	LSIDL
67  *
68  *		Idle: the LWP has been created but has not yet executed,
69  *		or it has ceased executing a unit of work and is waiting
70  *		to be started again.
71  *
72  *	LSSUSPENDED:
73  *
74  *		Suspended: the LWP has had its execution suspended by
75  *		another LWP in the same process using the _lwp_suspend()
76  *		system call.  User-level LWPs also enter the suspended
77  *		state when the system is shutting down.
78  *
79  *	The second set represent a "statement of intent" on behalf of the
80  *	LWP.  The LWP may in fact be executing on a processor, may be
81  *	sleeping or idle. It is expected to take the necessary action to
82  *	stop executing or become "running" again within a short timeframe.
83  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
84  *	Importantly, it indicates that its state is tied to a CPU.
85  *
86  *	LSZOMB:
87  *
88  *		Dead or dying: the LWP has released most of its resources
89  *		and is: a) about to switch away into oblivion b) has already
90  *		switched away.  When it switches away, its few remaining
91  *		resources can be collected.
92  *
93  *	LSSLEEP:
94  *
95  *		Sleeping: the LWP has entered itself onto a sleep queue, and
96  *		has switched away or will switch away shortly to allow other
97  *		LWPs to run on the CPU.
98  *
99  *	LSSTOP:
100  *
101  *		Stopped: the LWP has been stopped as a result of a job
102  *		control signal, or as a result of the ptrace() interface.
103  *
104  *		Stopped LWPs may run briefly within the kernel to handle
105  *		signals that they receive, but will not return to user space
106  *		until their process' state is changed away from stopped.
107  *
108  *		Single LWPs within a process can not be set stopped
109  *		selectively: all actions that can stop or continue LWPs
110  *		occur at the process level.
111  *
112  * State transitions
113  *
114  *	Note that the LSSTOP state may only be set when returning to
115  *	user space in userret(), or when sleeping interruptably.  The
116  *	LSSUSPENDED state may only be set in userret().  Before setting
117  *	those states, we try to ensure that the LWPs will release all
118  *	locks that they hold, and at a minimum try to ensure that the
119  *	LWP can be set runnable again by a signal.
120  *
121  *	LWPs may transition states in the following ways:
122  *
123  *	 RUN -------> ONPROC		ONPROC -----> RUN
124  *		    > STOPPED			    > SLEEP
125  *		    > SUSPENDED			    > STOPPED
126  *						    > SUSPENDED
127  *						    > ZOMB
128  *
129  *	 STOPPED ---> RUN		SUSPENDED --> RUN
130  *	            > SLEEP			    > SLEEP
131  *
132  *	 SLEEP -----> ONPROC		IDL --------> RUN
133  *		    > RUN			    > SUSPENDED
134  *		    > STOPPED			    > STOPPED
135  *		    > SUSPENDED
136  *
137  *	Other state transitions are possible with kernel threads (eg
138  *	ONPROC -> IDL), but only happen under tightly controlled
139  *	circumstances the side effects are understood.
140  *
141  * Migration
142  *
143  *	Migration of threads from one CPU to another could be performed
144  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
145  *	functions.  The universal lwp_migrate() function should be used for
146  *	any other cases.  Subsystems in the kernel must be aware that CPU
147  *	of LWP may change, while it is not locked.
148  *
149  * Locking
150  *
151  *	The majority of fields in 'struct lwp' are covered by a single,
152  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
153  *	each field are documented in sys/lwp.h.
154  *
155  *	State transitions must be made with the LWP's general lock held,
156  *	and may cause the LWP's lock pointer to change. Manipulation of
157  *	the general lock is not performed directly, but through calls to
158  *	lwp_lock(), lwp_relock() and similar.
159  *
160  *	States and their associated locks:
161  *
162  *	LSONPROC, LSZOMB:
163  *
164  *		Always covered by spc_lwplock, which protects running LWPs.
165  *		This is a per-CPU lock.
166  *
167  *	LSIDL, LSRUN:
168  *
169  *		Always covered by spc_mutex, which protects the run queues.
170  *		This is a per-CPU lock.
171  *
172  *	LSSLEEP:
173  *
174  *		Covered by a lock associated with the sleep queue that the
175  *		LWP resides on.
176  *
177  *	LSSTOP, LSSUSPENDED:
178  *
179  *		If the LWP was previously sleeping (l_wchan != NULL), then
180  *		l_mutex references the sleep queue lock.  If the LWP was
181  *		runnable or on the CPU when halted, or has been removed from
182  *		the sleep queue since halted, then the lock is spc_lwplock.
183  *
184  *	The lock order is as follows:
185  *
186  *		spc::spc_lwplock ->
187  *		    sleeptab::st_mutex ->
188  *			tschain_t::tc_mutex ->
189  *			    spc::spc_mutex
190  *
191  *	Each process has an scheduler state lock (proc::p_lock), and a
192  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
193  *	so on.  When an LWP is to be entered into or removed from one of the
194  *	following states, p_lock must be held and the process wide counters
195  *	adjusted:
196  *
197  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
198  *
199  *	Note that an LWP is considered running or likely to run soon if in
200  *	one of the following states.  This affects the value of p_nrlwps:
201  *
202  *		LSRUN, LSONPROC, LSSLEEP
203  *
204  *	p_lock does not need to be held when transitioning among these
205  *	three states.
206  */
207 
208 #include <sys/cdefs.h>
209 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.128 2009/03/03 21:55:06 rmind Exp $");
210 
211 #include "opt_ddb.h"
212 #include "opt_lockdebug.h"
213 #include "opt_sa.h"
214 
215 #define _LWP_API_PRIVATE
216 
217 #include <sys/param.h>
218 #include <sys/systm.h>
219 #include <sys/cpu.h>
220 #include <sys/pool.h>
221 #include <sys/proc.h>
222 #include <sys/sa.h>
223 #include <sys/savar.h>
224 #include <sys/syscallargs.h>
225 #include <sys/syscall_stats.h>
226 #include <sys/kauth.h>
227 #include <sys/sleepq.h>
228 #include <sys/user.h>
229 #include <sys/lockdebug.h>
230 #include <sys/kmem.h>
231 #include <sys/pset.h>
232 #include <sys/intr.h>
233 #include <sys/lwpctl.h>
234 #include <sys/atomic.h>
235 
236 #include <uvm/uvm_extern.h>
237 #include <uvm/uvm_object.h>
238 
239 struct lwplist	alllwp = LIST_HEAD_INITIALIZER(alllwp);
240 
241 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
242     &pool_allocator_nointr, IPL_NONE);
243 
244 static pool_cache_t lwp_cache;
245 static specificdata_domain_t lwp_specificdata_domain;
246 
247 void
248 lwpinit(void)
249 {
250 
251 	lwp_specificdata_domain = specificdata_domain_create();
252 	KASSERT(lwp_specificdata_domain != NULL);
253 	lwp_sys_init();
254 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
255 	    "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
256 }
257 
258 /*
259  * Set an suspended.
260  *
261  * Must be called with p_lock held, and the LWP locked.  Will unlock the
262  * LWP before return.
263  */
264 int
265 lwp_suspend(struct lwp *curl, struct lwp *t)
266 {
267 	int error;
268 
269 	KASSERT(mutex_owned(t->l_proc->p_lock));
270 	KASSERT(lwp_locked(t, NULL));
271 
272 	KASSERT(curl != t || curl->l_stat == LSONPROC);
273 
274 	/*
275 	 * If the current LWP has been told to exit, we must not suspend anyone
276 	 * else or deadlock could occur.  We won't return to userspace.
277 	 */
278 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
279 		lwp_unlock(t);
280 		return (EDEADLK);
281 	}
282 
283 	error = 0;
284 
285 	switch (t->l_stat) {
286 	case LSRUN:
287 	case LSONPROC:
288 		t->l_flag |= LW_WSUSPEND;
289 		lwp_need_userret(t);
290 		lwp_unlock(t);
291 		break;
292 
293 	case LSSLEEP:
294 		t->l_flag |= LW_WSUSPEND;
295 
296 		/*
297 		 * Kick the LWP and try to get it to the kernel boundary
298 		 * so that it will release any locks that it holds.
299 		 * setrunnable() will release the lock.
300 		 */
301 		if ((t->l_flag & LW_SINTR) != 0)
302 			setrunnable(t);
303 		else
304 			lwp_unlock(t);
305 		break;
306 
307 	case LSSUSPENDED:
308 		lwp_unlock(t);
309 		break;
310 
311 	case LSSTOP:
312 		t->l_flag |= LW_WSUSPEND;
313 		setrunnable(t);
314 		break;
315 
316 	case LSIDL:
317 	case LSZOMB:
318 		error = EINTR; /* It's what Solaris does..... */
319 		lwp_unlock(t);
320 		break;
321 	}
322 
323 	return (error);
324 }
325 
326 /*
327  * Restart a suspended LWP.
328  *
329  * Must be called with p_lock held, and the LWP locked.  Will unlock the
330  * LWP before return.
331  */
332 void
333 lwp_continue(struct lwp *l)
334 {
335 
336 	KASSERT(mutex_owned(l->l_proc->p_lock));
337 	KASSERT(lwp_locked(l, NULL));
338 
339 	/* If rebooting or not suspended, then just bail out. */
340 	if ((l->l_flag & LW_WREBOOT) != 0) {
341 		lwp_unlock(l);
342 		return;
343 	}
344 
345 	l->l_flag &= ~LW_WSUSPEND;
346 
347 	if (l->l_stat != LSSUSPENDED) {
348 		lwp_unlock(l);
349 		return;
350 	}
351 
352 	/* setrunnable() will release the lock. */
353 	setrunnable(l);
354 }
355 
356 /*
357  * Wait for an LWP within the current process to exit.  If 'lid' is
358  * non-zero, we are waiting for a specific LWP.
359  *
360  * Must be called with p->p_lock held.
361  */
362 int
363 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
364 {
365 	struct proc *p = l->l_proc;
366 	struct lwp *l2;
367 	int nfound, error;
368 	lwpid_t curlid;
369 	bool exiting;
370 
371 	KASSERT(mutex_owned(p->p_lock));
372 
373 	p->p_nlwpwait++;
374 	l->l_waitingfor = lid;
375 	curlid = l->l_lid;
376 	exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
377 
378 	for (;;) {
379 		/*
380 		 * Avoid a race between exit1() and sigexit(): if the
381 		 * process is dumping core, then we need to bail out: call
382 		 * into lwp_userret() where we will be suspended until the
383 		 * deed is done.
384 		 */
385 		if ((p->p_sflag & PS_WCORE) != 0) {
386 			mutex_exit(p->p_lock);
387 			lwp_userret(l);
388 #ifdef DIAGNOSTIC
389 			panic("lwp_wait1");
390 #endif
391 			/* NOTREACHED */
392 		}
393 
394 		/*
395 		 * First off, drain any detached LWP that is waiting to be
396 		 * reaped.
397 		 */
398 		while ((l2 = p->p_zomblwp) != NULL) {
399 			p->p_zomblwp = NULL;
400 			lwp_free(l2, false, false);/* releases proc mutex */
401 			mutex_enter(p->p_lock);
402 		}
403 
404 		/*
405 		 * Now look for an LWP to collect.  If the whole process is
406 		 * exiting, count detached LWPs as eligible to be collected,
407 		 * but don't drain them here.
408 		 */
409 		nfound = 0;
410 		error = 0;
411 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
412 			/*
413 			 * If a specific wait and the target is waiting on
414 			 * us, then avoid deadlock.  This also traps LWPs
415 			 * that try to wait on themselves.
416 			 *
417 			 * Note that this does not handle more complicated
418 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
419 			 * can still be killed so it is not a major problem.
420 			 */
421 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
422 				error = EDEADLK;
423 				break;
424 			}
425 			if (l2 == l)
426 				continue;
427 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
428 				nfound += exiting;
429 				continue;
430 			}
431 			if (lid != 0) {
432 				if (l2->l_lid != lid)
433 					continue;
434 				/*
435 				 * Mark this LWP as the first waiter, if there
436 				 * is no other.
437 				 */
438 				if (l2->l_waiter == 0)
439 					l2->l_waiter = curlid;
440 			} else if (l2->l_waiter != 0) {
441 				/*
442 				 * It already has a waiter - so don't
443 				 * collect it.  If the waiter doesn't
444 				 * grab it we'll get another chance
445 				 * later.
446 				 */
447 				nfound++;
448 				continue;
449 			}
450 			nfound++;
451 
452 			/* No need to lock the LWP in order to see LSZOMB. */
453 			if (l2->l_stat != LSZOMB)
454 				continue;
455 
456 			/*
457 			 * We're no longer waiting.  Reset the "first waiter"
458 			 * pointer on the target, in case it was us.
459 			 */
460 			l->l_waitingfor = 0;
461 			l2->l_waiter = 0;
462 			p->p_nlwpwait--;
463 			if (departed)
464 				*departed = l2->l_lid;
465 			sched_lwp_collect(l2);
466 
467 			/* lwp_free() releases the proc lock. */
468 			lwp_free(l2, false, false);
469 			mutex_enter(p->p_lock);
470 			return 0;
471 		}
472 
473 		if (error != 0)
474 			break;
475 		if (nfound == 0) {
476 			error = ESRCH;
477 			break;
478 		}
479 
480 		/*
481 		 * The kernel is careful to ensure that it can not deadlock
482 		 * when exiting - just keep waiting.
483 		 */
484 		if (exiting) {
485 			KASSERT(p->p_nlwps > 1);
486 			cv_wait(&p->p_lwpcv, p->p_lock);
487 			continue;
488 		}
489 
490 		/*
491 		 * If all other LWPs are waiting for exits or suspends
492 		 * and the supply of zombies and potential zombies is
493 		 * exhausted, then we are about to deadlock.
494 		 *
495 		 * If the process is exiting (and this LWP is not the one
496 		 * that is coordinating the exit) then bail out now.
497 		 */
498 		if ((p->p_sflag & PS_WEXIT) != 0 ||
499 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
500 			error = EDEADLK;
501 			break;
502 		}
503 
504 		/*
505 		 * Sit around and wait for something to happen.  We'll be
506 		 * awoken if any of the conditions examined change: if an
507 		 * LWP exits, is collected, or is detached.
508 		 */
509 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
510 			break;
511 	}
512 
513 	/*
514 	 * We didn't find any LWPs to collect, we may have received a
515 	 * signal, or some other condition has caused us to bail out.
516 	 *
517 	 * If waiting on a specific LWP, clear the waiters marker: some
518 	 * other LWP may want it.  Then, kick all the remaining waiters
519 	 * so that they can re-check for zombies and for deadlock.
520 	 */
521 	if (lid != 0) {
522 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
523 			if (l2->l_lid == lid) {
524 				if (l2->l_waiter == curlid)
525 					l2->l_waiter = 0;
526 				break;
527 			}
528 		}
529 	}
530 	p->p_nlwpwait--;
531 	l->l_waitingfor = 0;
532 	cv_broadcast(&p->p_lwpcv);
533 
534 	return error;
535 }
536 
537 /*
538  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
539  * The new LWP is created in state LSIDL and must be set running,
540  * suspended, or stopped by the caller.
541  */
542 int
543 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, bool inmem, int flags,
544 	   void *stack, size_t stacksize, void (*func)(void *), void *arg,
545 	   lwp_t **rnewlwpp, int sclass)
546 {
547 	struct lwp *l2, *isfree;
548 	turnstile_t *ts;
549 
550 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
551 
552 	/*
553 	 * First off, reap any detached LWP waiting to be collected.
554 	 * We can re-use its LWP structure and turnstile.
555 	 */
556 	isfree = NULL;
557 	if (p2->p_zomblwp != NULL) {
558 		mutex_enter(p2->p_lock);
559 		if ((isfree = p2->p_zomblwp) != NULL) {
560 			p2->p_zomblwp = NULL;
561 			lwp_free(isfree, true, false);/* releases proc mutex */
562 		} else
563 			mutex_exit(p2->p_lock);
564 	}
565 	if (isfree == NULL) {
566 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
567 		memset(l2, 0, sizeof(*l2));
568 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
569 		SLIST_INIT(&l2->l_pi_lenders);
570 	} else {
571 		l2 = isfree;
572 		ts = l2->l_ts;
573 		KASSERT(l2->l_inheritedprio == -1);
574 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
575 		memset(l2, 0, sizeof(*l2));
576 		l2->l_ts = ts;
577 	}
578 
579 	l2->l_stat = LSIDL;
580 	l2->l_proc = p2;
581 	l2->l_refcnt = 1;
582 	l2->l_class = sclass;
583 
584 	/*
585 	 * If vfork(), we want the LWP to run fast and on the same CPU
586 	 * as its parent, so that it can reuse the VM context and cache
587 	 * footprint on the local CPU.
588 	 */
589 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
590 	l2->l_kpribase = PRI_KERNEL;
591 	l2->l_priority = l1->l_priority;
592 	l2->l_inheritedprio = -1;
593 	l2->l_flag = inmem ? LW_INMEM : 0;
594 	l2->l_pflag = LP_MPSAFE;
595 	l2->l_fd = p2->p_fd;
596 	TAILQ_INIT(&l2->l_ld_locks);
597 
598 	if (p2->p_flag & PK_SYSTEM) {
599 		/* Mark it as a system LWP and not a candidate for swapping */
600 		l2->l_flag |= LW_SYSTEM;
601 	}
602 
603 	kpreempt_disable();
604 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
605 	l2->l_cpu = l1->l_cpu;
606 	kpreempt_enable();
607 
608 	lwp_initspecific(l2);
609 	sched_lwp_fork(l1, l2);
610 	lwp_update_creds(l2);
611 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
612 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
613 	mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
614 	cv_init(&l2->l_sigcv, "sigwait");
615 	l2->l_syncobj = &sched_syncobj;
616 
617 	if (rnewlwpp != NULL)
618 		*rnewlwpp = l2;
619 
620 	l2->l_addr = UAREA_TO_USER(uaddr);
621 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
622 	    (arg != NULL) ? arg : l2);
623 
624 	mutex_enter(p2->p_lock);
625 
626 	if ((flags & LWP_DETACHED) != 0) {
627 		l2->l_prflag = LPR_DETACHED;
628 		p2->p_ndlwps++;
629 	} else
630 		l2->l_prflag = 0;
631 
632 	l2->l_sigmask = l1->l_sigmask;
633 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
634 	sigemptyset(&l2->l_sigpend.sp_set);
635 
636 	p2->p_nlwpid++;
637 	if (p2->p_nlwpid == 0)
638 		p2->p_nlwpid++;
639 	l2->l_lid = p2->p_nlwpid;
640 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
641 	p2->p_nlwps++;
642 
643 	if ((p2->p_flag & PK_SYSTEM) == 0) {
644 		/* Inherit an affinity */
645 		if (l1->l_flag & LW_AFFINITY) {
646 			/*
647 			 * Note that we hold the state lock while inheriting
648 			 * the affinity to avoid race with sched_setaffinity().
649 			 */
650 			lwp_lock(l1);
651 			if (l1->l_flag & LW_AFFINITY) {
652 				kcpuset_use(l1->l_affinity);
653 				l2->l_affinity = l1->l_affinity;
654 				l2->l_flag |= LW_AFFINITY;
655 			}
656 			lwp_unlock(l1);
657 		}
658 		lwp_lock(l2);
659 		/* Inherit a processor-set */
660 		l2->l_psid = l1->l_psid;
661 		/* Look for a CPU to start */
662 		l2->l_cpu = sched_takecpu(l2);
663 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
664 	}
665 	mutex_exit(p2->p_lock);
666 
667 	mutex_enter(proc_lock);
668 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
669 	mutex_exit(proc_lock);
670 
671 	SYSCALL_TIME_LWP_INIT(l2);
672 
673 	if (p2->p_emul->e_lwp_fork)
674 		(*p2->p_emul->e_lwp_fork)(l1, l2);
675 
676 	return (0);
677 }
678 
679 /*
680  * Called by MD code when a new LWP begins execution.  Must be called
681  * with the previous LWP locked (so at splsched), or if there is no
682  * previous LWP, at splsched.
683  */
684 void
685 lwp_startup(struct lwp *prev, struct lwp *new)
686 {
687 
688 	KASSERT(kpreempt_disabled());
689 	if (prev != NULL) {
690 		/*
691 		 * Normalize the count of the spin-mutexes, it was
692 		 * increased in mi_switch().  Unmark the state of
693 		 * context switch - it is finished for previous LWP.
694 		 */
695 		curcpu()->ci_mtx_count++;
696 		membar_exit();
697 		prev->l_ctxswtch = 0;
698 	}
699 	KPREEMPT_DISABLE(new);
700 	spl0();
701 	pmap_activate(new);
702 	LOCKDEBUG_BARRIER(NULL, 0);
703 	KPREEMPT_ENABLE(new);
704 	if ((new->l_pflag & LP_MPSAFE) == 0) {
705 		KERNEL_LOCK(1, new);
706 	}
707 }
708 
709 /*
710  * Exit an LWP.
711  */
712 void
713 lwp_exit(struct lwp *l)
714 {
715 	struct proc *p = l->l_proc;
716 	struct lwp *l2;
717 	bool current;
718 
719 	current = (l == curlwp);
720 
721 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
722 
723 	/*
724 	 * Verify that we hold no locks other than the kernel lock.
725 	 */
726 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
727 
728 	/*
729 	 * If we are the last live LWP in a process, we need to exit the
730 	 * entire process.  We do so with an exit status of zero, because
731 	 * it's a "controlled" exit, and because that's what Solaris does.
732 	 *
733 	 * We are not quite a zombie yet, but for accounting purposes we
734 	 * must increment the count of zombies here.
735 	 *
736 	 * Note: the last LWP's specificdata will be deleted here.
737 	 */
738 	mutex_enter(p->p_lock);
739 	if (p->p_nlwps - p->p_nzlwps == 1) {
740 		KASSERT(current == true);
741 		/* XXXSMP kernel_lock not held */
742 		exit1(l, 0);
743 		/* NOTREACHED */
744 	}
745 	p->p_nzlwps++;
746 	mutex_exit(p->p_lock);
747 
748 	if (p->p_emul->e_lwp_exit)
749 		(*p->p_emul->e_lwp_exit)(l);
750 
751 	/* Delete the specificdata while it's still safe to sleep. */
752 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
753 
754 	/*
755 	 * Release our cached credentials.
756 	 */
757 	kauth_cred_free(l->l_cred);
758 	callout_destroy(&l->l_timeout_ch);
759 
760 	/*
761 	 * While we can still block, mark the LWP as unswappable to
762 	 * prevent conflicts with the with the swapper.
763 	 */
764 	if (current)
765 		uvm_lwp_hold(l);
766 
767 	/*
768 	 * Remove the LWP from the global list.
769 	 */
770 	mutex_enter(proc_lock);
771 	LIST_REMOVE(l, l_list);
772 	mutex_exit(proc_lock);
773 
774 	/*
775 	 * Get rid of all references to the LWP that others (e.g. procfs)
776 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
777 	 * mark it waiting for collection in the proc structure.  Note that
778 	 * before we can do that, we need to free any other dead, deatched
779 	 * LWP waiting to meet its maker.
780 	 */
781 	mutex_enter(p->p_lock);
782 	lwp_drainrefs(l);
783 
784 	if ((l->l_prflag & LPR_DETACHED) != 0) {
785 		while ((l2 = p->p_zomblwp) != NULL) {
786 			p->p_zomblwp = NULL;
787 			lwp_free(l2, false, false);/* releases proc mutex */
788 			mutex_enter(p->p_lock);
789 			l->l_refcnt++;
790 			lwp_drainrefs(l);
791 		}
792 		p->p_zomblwp = l;
793 	}
794 
795 	/*
796 	 * If we find a pending signal for the process and we have been
797 	 * asked to check for signals, then we loose: arrange to have
798 	 * all other LWPs in the process check for signals.
799 	 */
800 	if ((l->l_flag & LW_PENDSIG) != 0 &&
801 	    firstsig(&p->p_sigpend.sp_set) != 0) {
802 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
803 			lwp_lock(l2);
804 			l2->l_flag |= LW_PENDSIG;
805 			lwp_unlock(l2);
806 		}
807 	}
808 
809 	lwp_lock(l);
810 	l->l_stat = LSZOMB;
811 	if (l->l_name != NULL)
812 		strcpy(l->l_name, "(zombie)");
813 	if (l->l_flag & LW_AFFINITY) {
814 		l->l_flag &= ~LW_AFFINITY;
815 	} else {
816 		KASSERT(l->l_affinity == NULL);
817 	}
818 	lwp_unlock(l);
819 	p->p_nrlwps--;
820 	cv_broadcast(&p->p_lwpcv);
821 	if (l->l_lwpctl != NULL)
822 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
823 	mutex_exit(p->p_lock);
824 
825 	/* Safe without lock since LWP is in zombie state */
826 	if (l->l_affinity) {
827 		kcpuset_unuse(l->l_affinity, NULL);
828 		l->l_affinity = NULL;
829 	}
830 
831 	/*
832 	 * We can no longer block.  At this point, lwp_free() may already
833 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
834 	 *
835 	 * Free MD LWP resources.
836 	 */
837 #ifndef __NO_CPU_LWP_FREE
838 	cpu_lwp_free(l, 0);
839 #endif
840 
841 	if (current) {
842 		pmap_deactivate(l);
843 
844 		/*
845 		 * Release the kernel lock, and switch away into
846 		 * oblivion.
847 		 */
848 #ifdef notyet
849 		/* XXXSMP hold in lwp_userret() */
850 		KERNEL_UNLOCK_LAST(l);
851 #else
852 		KERNEL_UNLOCK_ALL(l, NULL);
853 #endif
854 		lwp_exit_switchaway(l);
855 	}
856 }
857 
858 /*
859  * Free a dead LWP's remaining resources.
860  *
861  * XXXLWP limits.
862  */
863 void
864 lwp_free(struct lwp *l, bool recycle, bool last)
865 {
866 	struct proc *p = l->l_proc;
867 	struct rusage *ru;
868 	ksiginfoq_t kq;
869 
870 	KASSERT(l != curlwp);
871 
872 	/*
873 	 * If this was not the last LWP in the process, then adjust
874 	 * counters and unlock.
875 	 */
876 	if (!last) {
877 		/*
878 		 * Add the LWP's run time to the process' base value.
879 		 * This needs to co-incide with coming off p_lwps.
880 		 */
881 		bintime_add(&p->p_rtime, &l->l_rtime);
882 		p->p_pctcpu += l->l_pctcpu;
883 		ru = &p->p_stats->p_ru;
884 		ruadd(ru, &l->l_ru);
885 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
886 		ru->ru_nivcsw += l->l_nivcsw;
887 		LIST_REMOVE(l, l_sibling);
888 		p->p_nlwps--;
889 		p->p_nzlwps--;
890 		if ((l->l_prflag & LPR_DETACHED) != 0)
891 			p->p_ndlwps--;
892 
893 		/*
894 		 * Have any LWPs sleeping in lwp_wait() recheck for
895 		 * deadlock.
896 		 */
897 		cv_broadcast(&p->p_lwpcv);
898 		mutex_exit(p->p_lock);
899 	}
900 
901 #ifdef MULTIPROCESSOR
902 	/*
903 	 * In the unlikely event that the LWP is still on the CPU,
904 	 * then spin until it has switched away.  We need to release
905 	 * all locks to avoid deadlock against interrupt handlers on
906 	 * the target CPU.
907 	 */
908 	if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
909 		int count;
910 		(void)count; /* XXXgcc */
911 		KERNEL_UNLOCK_ALL(curlwp, &count);
912 		while ((l->l_pflag & LP_RUNNING) != 0 ||
913 		    l->l_cpu->ci_curlwp == l)
914 			SPINLOCK_BACKOFF_HOOK;
915 		KERNEL_LOCK(count, curlwp);
916 	}
917 #endif
918 
919 	/*
920 	 * Destroy the LWP's remaining signal information.
921 	 */
922 	ksiginfo_queue_init(&kq);
923 	sigclear(&l->l_sigpend, NULL, &kq);
924 	ksiginfo_queue_drain(&kq);
925 	cv_destroy(&l->l_sigcv);
926 	mutex_destroy(&l->l_swaplock);
927 
928 	/*
929 	 * Free the LWP's turnstile and the LWP structure itself unless the
930 	 * caller wants to recycle them.  Also, free the scheduler specific
931 	 * data.
932 	 *
933 	 * We can't return turnstile0 to the pool (it didn't come from it),
934 	 * so if it comes up just drop it quietly and move on.
935 	 *
936 	 * We don't recycle the VM resources at this time.
937 	 */
938 	if (l->l_lwpctl != NULL)
939 		lwp_ctl_free(l);
940 
941 	if (!recycle && l->l_ts != &turnstile0)
942 		pool_cache_put(turnstile_cache, l->l_ts);
943 	if (l->l_name != NULL)
944 		kmem_free(l->l_name, MAXCOMLEN);
945 #ifndef __NO_CPU_LWP_FREE
946 	cpu_lwp_free2(l);
947 #endif
948 	KASSERT((l->l_flag & LW_INMEM) != 0);
949 	uvm_lwp_exit(l);
950 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
951 	KASSERT(l->l_inheritedprio == -1);
952 	if (!recycle)
953 		pool_cache_put(lwp_cache, l);
954 }
955 
956 /*
957  * Migrate the LWP to the another CPU.  Unlocks the LWP.
958  */
959 void
960 lwp_migrate(lwp_t *l, struct cpu_info *tci)
961 {
962 	struct schedstate_percpu *tspc;
963 	int lstat = l->l_stat;
964 
965 	KASSERT(lwp_locked(l, NULL));
966 	KASSERT(tci != NULL);
967 
968 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
969 	if ((l->l_pflag & LP_RUNNING) != 0) {
970 		lstat = LSONPROC;
971 	}
972 
973 	/*
974 	 * The destination CPU could be changed while previous migration
975 	 * was not finished.
976 	 */
977 	if (l->l_target_cpu != NULL) {
978 		l->l_target_cpu = tci;
979 		lwp_unlock(l);
980 		return;
981 	}
982 
983 	/* Nothing to do if trying to migrate to the same CPU */
984 	if (l->l_cpu == tci) {
985 		lwp_unlock(l);
986 		return;
987 	}
988 
989 	KASSERT(l->l_target_cpu == NULL);
990 	tspc = &tci->ci_schedstate;
991 	switch (lstat) {
992 	case LSRUN:
993 		if (l->l_flag & LW_INMEM) {
994 			l->l_target_cpu = tci;
995 			lwp_unlock(l);
996 			return;
997 		}
998 	case LSIDL:
999 		l->l_cpu = tci;
1000 		lwp_unlock_to(l, tspc->spc_mutex);
1001 		return;
1002 	case LSSLEEP:
1003 		l->l_cpu = tci;
1004 		break;
1005 	case LSSTOP:
1006 	case LSSUSPENDED:
1007 		l->l_cpu = tci;
1008 		if (l->l_wchan == NULL) {
1009 			lwp_unlock_to(l, tspc->spc_lwplock);
1010 			return;
1011 		}
1012 		break;
1013 	case LSONPROC:
1014 		l->l_target_cpu = tci;
1015 		spc_lock(l->l_cpu);
1016 		cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
1017 		spc_unlock(l->l_cpu);
1018 		break;
1019 	}
1020 	lwp_unlock(l);
1021 }
1022 
1023 /*
1024  * Find the LWP in the process.  Arguments may be zero, in such case,
1025  * the calling process and first LWP in the list will be used.
1026  * On success - returns proc locked.
1027  */
1028 struct lwp *
1029 lwp_find2(pid_t pid, lwpid_t lid)
1030 {
1031 	proc_t *p;
1032 	lwp_t *l;
1033 
1034 	/* Find the process */
1035 	p = (pid == 0) ? curlwp->l_proc : p_find(pid, PFIND_UNLOCK_FAIL);
1036 	if (p == NULL)
1037 		return NULL;
1038 	mutex_enter(p->p_lock);
1039 	if (pid != 0) {
1040 		/* Case of p_find */
1041 		mutex_exit(proc_lock);
1042 	}
1043 
1044 	/* Find the thread */
1045 	l = (lid == 0) ? LIST_FIRST(&p->p_lwps) : lwp_find(p, lid);
1046 	if (l == NULL) {
1047 		mutex_exit(p->p_lock);
1048 	}
1049 
1050 	return l;
1051 }
1052 
1053 /*
1054  * Look up a live LWP within the speicifed process, and return it locked.
1055  *
1056  * Must be called with p->p_lock held.
1057  */
1058 struct lwp *
1059 lwp_find(struct proc *p, int id)
1060 {
1061 	struct lwp *l;
1062 
1063 	KASSERT(mutex_owned(p->p_lock));
1064 
1065 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1066 		if (l->l_lid == id)
1067 			break;
1068 	}
1069 
1070 	/*
1071 	 * No need to lock - all of these conditions will
1072 	 * be visible with the process level mutex held.
1073 	 */
1074 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1075 		l = NULL;
1076 
1077 	return l;
1078 }
1079 
1080 /*
1081  * Update an LWP's cached credentials to mirror the process' master copy.
1082  *
1083  * This happens early in the syscall path, on user trap, and on LWP
1084  * creation.  A long-running LWP can also voluntarily choose to update
1085  * it's credentials by calling this routine.  This may be called from
1086  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1087  */
1088 void
1089 lwp_update_creds(struct lwp *l)
1090 {
1091 	kauth_cred_t oc;
1092 	struct proc *p;
1093 
1094 	p = l->l_proc;
1095 	oc = l->l_cred;
1096 
1097 	mutex_enter(p->p_lock);
1098 	kauth_cred_hold(p->p_cred);
1099 	l->l_cred = p->p_cred;
1100 	l->l_prflag &= ~LPR_CRMOD;
1101 	mutex_exit(p->p_lock);
1102 	if (oc != NULL)
1103 		kauth_cred_free(oc);
1104 }
1105 
1106 /*
1107  * Verify that an LWP is locked, and optionally verify that the lock matches
1108  * one we specify.
1109  */
1110 int
1111 lwp_locked(struct lwp *l, kmutex_t *mtx)
1112 {
1113 	kmutex_t *cur = l->l_mutex;
1114 
1115 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1116 }
1117 
1118 /*
1119  * Lock an LWP.
1120  */
1121 kmutex_t *
1122 lwp_lock_retry(struct lwp *l, kmutex_t *old)
1123 {
1124 
1125 	/*
1126 	 * XXXgcc ignoring kmutex_t * volatile on i386
1127 	 *
1128 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1129 	 */
1130 #if 1
1131 	while (l->l_mutex != old) {
1132 #else
1133 	for (;;) {
1134 #endif
1135 		mutex_spin_exit(old);
1136 		old = l->l_mutex;
1137 		mutex_spin_enter(old);
1138 
1139 		/*
1140 		 * mutex_enter() will have posted a read barrier.  Re-test
1141 		 * l->l_mutex.  If it has changed, we need to try again.
1142 		 */
1143 #if 1
1144 	}
1145 #else
1146 	} while (__predict_false(l->l_mutex != old));
1147 #endif
1148 
1149 	return old;
1150 }
1151 
1152 /*
1153  * Lend a new mutex to an LWP.  The old mutex must be held.
1154  */
1155 void
1156 lwp_setlock(struct lwp *l, kmutex_t *new)
1157 {
1158 
1159 	KASSERT(mutex_owned(l->l_mutex));
1160 
1161 	membar_exit();
1162 	l->l_mutex = new;
1163 }
1164 
1165 /*
1166  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
1167  * must be held.
1168  */
1169 void
1170 lwp_unlock_to(struct lwp *l, kmutex_t *new)
1171 {
1172 	kmutex_t *old;
1173 
1174 	KASSERT(mutex_owned(l->l_mutex));
1175 
1176 	old = l->l_mutex;
1177 	membar_exit();
1178 	l->l_mutex = new;
1179 	mutex_spin_exit(old);
1180 }
1181 
1182 /*
1183  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
1184  * locked.
1185  */
1186 void
1187 lwp_relock(struct lwp *l, kmutex_t *new)
1188 {
1189 	kmutex_t *old;
1190 
1191 	KASSERT(mutex_owned(l->l_mutex));
1192 
1193 	old = l->l_mutex;
1194 	if (old != new) {
1195 		mutex_spin_enter(new);
1196 		l->l_mutex = new;
1197 		mutex_spin_exit(old);
1198 	}
1199 }
1200 
1201 int
1202 lwp_trylock(struct lwp *l)
1203 {
1204 	kmutex_t *old;
1205 
1206 	for (;;) {
1207 		if (!mutex_tryenter(old = l->l_mutex))
1208 			return 0;
1209 		if (__predict_true(l->l_mutex == old))
1210 			return 1;
1211 		mutex_spin_exit(old);
1212 	}
1213 }
1214 
1215 u_int
1216 lwp_unsleep(lwp_t *l, bool cleanup)
1217 {
1218 
1219 	KASSERT(mutex_owned(l->l_mutex));
1220 
1221 	return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
1222 }
1223 
1224 
1225 /*
1226  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
1227  * set.
1228  */
1229 void
1230 lwp_userret(struct lwp *l)
1231 {
1232 	struct proc *p;
1233 	void (*hook)(void);
1234 	int sig;
1235 
1236 	KASSERT(l == curlwp);
1237 	KASSERT(l->l_stat == LSONPROC);
1238 	p = l->l_proc;
1239 
1240 #ifndef __HAVE_FAST_SOFTINTS
1241 	/* Run pending soft interrupts. */
1242 	if (l->l_cpu->ci_data.cpu_softints != 0)
1243 		softint_overlay();
1244 #endif
1245 
1246 #ifdef KERN_SA
1247 	/* Generate UNBLOCKED upcall if needed */
1248 	if (l->l_flag & LW_SA_BLOCKING) {
1249 		sa_unblock_userret(l);
1250 		/* NOTREACHED */
1251 	}
1252 #endif
1253 
1254 	/*
1255 	 * It should be safe to do this read unlocked on a multiprocessor
1256 	 * system..
1257 	 *
1258 	 * LW_SA_UPCALL will be handled after the while() loop, so don't
1259 	 * consider it now.
1260 	 */
1261 	while ((l->l_flag & (LW_USERRET & ~(LW_SA_UPCALL))) != 0) {
1262 		/*
1263 		 * Process pending signals first, unless the process
1264 		 * is dumping core or exiting, where we will instead
1265 		 * enter the LW_WSUSPEND case below.
1266 		 */
1267 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1268 		    LW_PENDSIG) {
1269 			mutex_enter(p->p_lock);
1270 			while ((sig = issignal(l)) != 0)
1271 				postsig(sig);
1272 			mutex_exit(p->p_lock);
1273 		}
1274 
1275 		/*
1276 		 * Core-dump or suspend pending.
1277 		 *
1278 		 * In case of core dump, suspend ourselves, so that the
1279 		 * kernel stack and therefore the userland registers saved
1280 		 * in the trapframe are around for coredump() to write them
1281 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
1282 		 * will write the core file out once all other LWPs are
1283 		 * suspended.
1284 		 */
1285 		if ((l->l_flag & LW_WSUSPEND) != 0) {
1286 			mutex_enter(p->p_lock);
1287 			p->p_nrlwps--;
1288 			cv_broadcast(&p->p_lwpcv);
1289 			lwp_lock(l);
1290 			l->l_stat = LSSUSPENDED;
1291 			lwp_unlock(l);
1292 			mutex_exit(p->p_lock);
1293 			lwp_lock(l);
1294 			mi_switch(l);
1295 		}
1296 
1297 		/* Process is exiting. */
1298 		if ((l->l_flag & LW_WEXIT) != 0) {
1299 			lwp_exit(l);
1300 			KASSERT(0);
1301 			/* NOTREACHED */
1302 		}
1303 
1304 		/* Call userret hook; used by Linux emulation. */
1305 		if ((l->l_flag & LW_WUSERRET) != 0) {
1306 			lwp_lock(l);
1307 			l->l_flag &= ~LW_WUSERRET;
1308 			lwp_unlock(l);
1309 			hook = p->p_userret;
1310 			p->p_userret = NULL;
1311 			(*hook)();
1312 		}
1313 	}
1314 
1315 #ifdef KERN_SA
1316 	/*
1317 	 * Timer events are handled specially.  We only try once to deliver
1318 	 * pending timer upcalls; if if fails, we can try again on the next
1319 	 * loop around.  If we need to re-enter lwp_userret(), MD code will
1320 	 * bounce us back here through the trap path after we return.
1321 	 */
1322 	if (p->p_timerpend)
1323 		timerupcall(l);
1324 	if (l->l_flag & LW_SA_UPCALL)
1325 		sa_upcall_userret(l);
1326 #endif /* KERN_SA */
1327 }
1328 
1329 /*
1330  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1331  */
1332 void
1333 lwp_need_userret(struct lwp *l)
1334 {
1335 	KASSERT(lwp_locked(l, NULL));
1336 
1337 	/*
1338 	 * Since the tests in lwp_userret() are done unlocked, make sure
1339 	 * that the condition will be seen before forcing the LWP to enter
1340 	 * kernel mode.
1341 	 */
1342 	membar_producer();
1343 	cpu_signotify(l);
1344 }
1345 
1346 /*
1347  * Add one reference to an LWP.  This will prevent the LWP from
1348  * exiting, thus keep the lwp structure and PCB around to inspect.
1349  */
1350 void
1351 lwp_addref(struct lwp *l)
1352 {
1353 
1354 	KASSERT(mutex_owned(l->l_proc->p_lock));
1355 	KASSERT(l->l_stat != LSZOMB);
1356 	KASSERT(l->l_refcnt != 0);
1357 
1358 	l->l_refcnt++;
1359 }
1360 
1361 /*
1362  * Remove one reference to an LWP.  If this is the last reference,
1363  * then we must finalize the LWP's death.
1364  */
1365 void
1366 lwp_delref(struct lwp *l)
1367 {
1368 	struct proc *p = l->l_proc;
1369 
1370 	mutex_enter(p->p_lock);
1371 	KASSERT(l->l_stat != LSZOMB);
1372 	KASSERT(l->l_refcnt > 0);
1373 	if (--l->l_refcnt == 0)
1374 		cv_broadcast(&p->p_lwpcv);
1375 	mutex_exit(p->p_lock);
1376 }
1377 
1378 /*
1379  * Drain all references to the current LWP.
1380  */
1381 void
1382 lwp_drainrefs(struct lwp *l)
1383 {
1384 	struct proc *p = l->l_proc;
1385 
1386 	KASSERT(mutex_owned(p->p_lock));
1387 	KASSERT(l->l_refcnt != 0);
1388 
1389 	l->l_refcnt--;
1390 	while (l->l_refcnt != 0)
1391 		cv_wait(&p->p_lwpcv, p->p_lock);
1392 }
1393 
1394 /*
1395  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
1396  * be held.
1397  */
1398 bool
1399 lwp_alive(lwp_t *l)
1400 {
1401 
1402 	KASSERT(mutex_owned(l->l_proc->p_lock));
1403 
1404 	switch (l->l_stat) {
1405 	case LSSLEEP:
1406 	case LSRUN:
1407 	case LSONPROC:
1408 	case LSSTOP:
1409 	case LSSUSPENDED:
1410 		return true;
1411 	default:
1412 		return false;
1413 	}
1414 }
1415 
1416 /*
1417  * Return first live LWP in the process.
1418  */
1419 lwp_t *
1420 lwp_find_first(proc_t *p)
1421 {
1422 	lwp_t *l;
1423 
1424 	KASSERT(mutex_owned(p->p_lock));
1425 
1426 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1427 		if (lwp_alive(l)) {
1428 			return l;
1429 		}
1430 	}
1431 
1432 	return NULL;
1433 }
1434 
1435 /*
1436  * lwp_specific_key_create --
1437  *	Create a key for subsystem lwp-specific data.
1438  */
1439 int
1440 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1441 {
1442 
1443 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1444 }
1445 
1446 /*
1447  * lwp_specific_key_delete --
1448  *	Delete a key for subsystem lwp-specific data.
1449  */
1450 void
1451 lwp_specific_key_delete(specificdata_key_t key)
1452 {
1453 
1454 	specificdata_key_delete(lwp_specificdata_domain, key);
1455 }
1456 
1457 /*
1458  * lwp_initspecific --
1459  *	Initialize an LWP's specificdata container.
1460  */
1461 void
1462 lwp_initspecific(struct lwp *l)
1463 {
1464 	int error;
1465 
1466 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1467 	KASSERT(error == 0);
1468 }
1469 
1470 /*
1471  * lwp_finispecific --
1472  *	Finalize an LWP's specificdata container.
1473  */
1474 void
1475 lwp_finispecific(struct lwp *l)
1476 {
1477 
1478 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1479 }
1480 
1481 /*
1482  * lwp_getspecific --
1483  *	Return lwp-specific data corresponding to the specified key.
1484  *
1485  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
1486  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
1487  *	LWP's specifc data, care must be taken to ensure that doing so
1488  *	would not cause internal data structure inconsistency (i.e. caller
1489  *	can guarantee that the target LWP is not inside an lwp_getspecific()
1490  *	or lwp_setspecific() call).
1491  */
1492 void *
1493 lwp_getspecific(specificdata_key_t key)
1494 {
1495 
1496 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1497 						  &curlwp->l_specdataref, key));
1498 }
1499 
1500 void *
1501 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1502 {
1503 
1504 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1505 						  &l->l_specdataref, key));
1506 }
1507 
1508 /*
1509  * lwp_setspecific --
1510  *	Set lwp-specific data corresponding to the specified key.
1511  */
1512 void
1513 lwp_setspecific(specificdata_key_t key, void *data)
1514 {
1515 
1516 	specificdata_setspecific(lwp_specificdata_domain,
1517 				 &curlwp->l_specdataref, key, data);
1518 }
1519 
1520 /*
1521  * Allocate a new lwpctl structure for a user LWP.
1522  */
1523 int
1524 lwp_ctl_alloc(vaddr_t *uaddr)
1525 {
1526 	lcproc_t *lp;
1527 	u_int bit, i, offset;
1528 	struct uvm_object *uao;
1529 	int error;
1530 	lcpage_t *lcp;
1531 	proc_t *p;
1532 	lwp_t *l;
1533 
1534 	l = curlwp;
1535 	p = l->l_proc;
1536 
1537 	if (l->l_lcpage != NULL) {
1538 		lcp = l->l_lcpage;
1539 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1540 		return (EINVAL);
1541 	}
1542 
1543 	/* First time around, allocate header structure for the process. */
1544 	if ((lp = p->p_lwpctl) == NULL) {
1545 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1546 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1547 		lp->lp_uao = NULL;
1548 		TAILQ_INIT(&lp->lp_pages);
1549 		mutex_enter(p->p_lock);
1550 		if (p->p_lwpctl == NULL) {
1551 			p->p_lwpctl = lp;
1552 			mutex_exit(p->p_lock);
1553 		} else {
1554 			mutex_exit(p->p_lock);
1555 			mutex_destroy(&lp->lp_lock);
1556 			kmem_free(lp, sizeof(*lp));
1557 			lp = p->p_lwpctl;
1558 		}
1559 	}
1560 
1561  	/*
1562  	 * Set up an anonymous memory region to hold the shared pages.
1563  	 * Map them into the process' address space.  The user vmspace
1564  	 * gets the first reference on the UAO.
1565  	 */
1566 	mutex_enter(&lp->lp_lock);
1567 	if (lp->lp_uao == NULL) {
1568 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1569 		lp->lp_cur = 0;
1570 		lp->lp_max = LWPCTL_UAREA_SZ;
1571 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1572 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
1573 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1574 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1575 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1576 		if (error != 0) {
1577 			uao_detach(lp->lp_uao);
1578 			lp->lp_uao = NULL;
1579 			mutex_exit(&lp->lp_lock);
1580 			return error;
1581 		}
1582 	}
1583 
1584 	/* Get a free block and allocate for this LWP. */
1585 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1586 		if (lcp->lcp_nfree != 0)
1587 			break;
1588 	}
1589 	if (lcp == NULL) {
1590 		/* Nothing available - try to set up a free page. */
1591 		if (lp->lp_cur == lp->lp_max) {
1592 			mutex_exit(&lp->lp_lock);
1593 			return ENOMEM;
1594 		}
1595 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1596 		if (lcp == NULL) {
1597 			mutex_exit(&lp->lp_lock);
1598 			return ENOMEM;
1599 		}
1600 		/*
1601 		 * Wire the next page down in kernel space.  Since this
1602 		 * is a new mapping, we must add a reference.
1603 		 */
1604 		uao = lp->lp_uao;
1605 		(*uao->pgops->pgo_reference)(uao);
1606 		lcp->lcp_kaddr = vm_map_min(kernel_map);
1607 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1608 		    uao, lp->lp_cur, PAGE_SIZE,
1609 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1610 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1611 		if (error != 0) {
1612 			mutex_exit(&lp->lp_lock);
1613 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1614 			(*uao->pgops->pgo_detach)(uao);
1615 			return error;
1616 		}
1617 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1618 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1619 		if (error != 0) {
1620 			mutex_exit(&lp->lp_lock);
1621 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
1622 			    lcp->lcp_kaddr + PAGE_SIZE);
1623 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1624 			return error;
1625 		}
1626 		/* Prepare the page descriptor and link into the list. */
1627 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1628 		lp->lp_cur += PAGE_SIZE;
1629 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
1630 		lcp->lcp_rotor = 0;
1631 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1632 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1633 	}
1634 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1635 		if (++i >= LWPCTL_BITMAP_ENTRIES)
1636 			i = 0;
1637 	}
1638 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
1639 	lcp->lcp_bitmap[i] ^= (1 << bit);
1640 	lcp->lcp_rotor = i;
1641 	lcp->lcp_nfree--;
1642 	l->l_lcpage = lcp;
1643 	offset = (i << 5) + bit;
1644 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1645 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1646 	mutex_exit(&lp->lp_lock);
1647 
1648 	KPREEMPT_DISABLE(l);
1649 	l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
1650 	KPREEMPT_ENABLE(l);
1651 
1652 	return 0;
1653 }
1654 
1655 /*
1656  * Free an lwpctl structure back to the per-process list.
1657  */
1658 void
1659 lwp_ctl_free(lwp_t *l)
1660 {
1661 	lcproc_t *lp;
1662 	lcpage_t *lcp;
1663 	u_int map, offset;
1664 
1665 	lp = l->l_proc->p_lwpctl;
1666 	KASSERT(lp != NULL);
1667 
1668 	lcp = l->l_lcpage;
1669 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1670 	KASSERT(offset < LWPCTL_PER_PAGE);
1671 
1672 	mutex_enter(&lp->lp_lock);
1673 	lcp->lcp_nfree++;
1674 	map = offset >> 5;
1675 	lcp->lcp_bitmap[map] |= (1 << (offset & 31));
1676 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1677 		lcp->lcp_rotor = map;
1678 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1679 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1680 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1681 	}
1682 	mutex_exit(&lp->lp_lock);
1683 }
1684 
1685 /*
1686  * Process is exiting; tear down lwpctl state.  This can only be safely
1687  * called by the last LWP in the process.
1688  */
1689 void
1690 lwp_ctl_exit(void)
1691 {
1692 	lcpage_t *lcp, *next;
1693 	lcproc_t *lp;
1694 	proc_t *p;
1695 	lwp_t *l;
1696 
1697 	l = curlwp;
1698 	l->l_lwpctl = NULL;
1699 	l->l_lcpage = NULL;
1700 	p = l->l_proc;
1701 	lp = p->p_lwpctl;
1702 
1703 	KASSERT(lp != NULL);
1704 	KASSERT(p->p_nlwps == 1);
1705 
1706 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
1707 		next = TAILQ_NEXT(lcp, lcp_chain);
1708 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
1709 		    lcp->lcp_kaddr + PAGE_SIZE);
1710 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1711 	}
1712 
1713 	if (lp->lp_uao != NULL) {
1714 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
1715 		    lp->lp_uva + LWPCTL_UAREA_SZ);
1716 	}
1717 
1718 	mutex_destroy(&lp->lp_lock);
1719 	kmem_free(lp, sizeof(*lp));
1720 	p->p_lwpctl = NULL;
1721 }
1722 
1723 #if defined(DDB)
1724 void
1725 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1726 {
1727 	lwp_t *l;
1728 
1729 	LIST_FOREACH(l, &alllwp, l_list) {
1730 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
1731 
1732 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
1733 			continue;
1734 		}
1735 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
1736 		    (void *)addr, (void *)stack,
1737 		    (size_t)(addr - stack), l);
1738 	}
1739 }
1740 #endif /* defined(DDB) */
1741