xref: /netbsd-src/sys/kern/kern_lwp.c (revision abb0f93cd77b67f080613360c65701f85e5f5cfe)
1 /*	$NetBSD: kern_lwp.c,v 1.136 2009/10/27 02:58:28 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Overview
34  *
35  *	Lightweight processes (LWPs) are the basic unit or thread of
36  *	execution within the kernel.  The core state of an LWP is described
37  *	by "struct lwp", also known as lwp_t.
38  *
39  *	Each LWP is contained within a process (described by "struct proc"),
40  *	Every process contains at least one LWP, but may contain more.  The
41  *	process describes attributes shared among all of its LWPs such as a
42  *	private address space, global execution state (stopped, active,
43  *	zombie, ...), signal disposition and so on.  On a multiprocessor
44  *	machine, multiple LWPs be executing concurrently in the kernel.
45  *
46  * Execution states
47  *
48  *	At any given time, an LWP has overall state that is described by
49  *	lwp::l_stat.  The states are broken into two sets below.  The first
50  *	set is guaranteed to represent the absolute, current state of the
51  *	LWP:
52  *
53  *	LSONPROC
54  *
55  *		On processor: the LWP is executing on a CPU, either in the
56  *		kernel or in user space.
57  *
58  *	LSRUN
59  *
60  *		Runnable: the LWP is parked on a run queue, and may soon be
61  *		chosen to run by an idle processor, or by a processor that
62  *		has been asked to preempt a currently runnning but lower
63  *		priority LWP.
64  *
65  *	LSIDL
66  *
67  *		Idle: the LWP has been created but has not yet executed,
68  *		or it has ceased executing a unit of work and is waiting
69  *		to be started again.
70  *
71  *	LSSUSPENDED:
72  *
73  *		Suspended: the LWP has had its execution suspended by
74  *		another LWP in the same process using the _lwp_suspend()
75  *		system call.  User-level LWPs also enter the suspended
76  *		state when the system is shutting down.
77  *
78  *	The second set represent a "statement of intent" on behalf of the
79  *	LWP.  The LWP may in fact be executing on a processor, may be
80  *	sleeping or idle. It is expected to take the necessary action to
81  *	stop executing or become "running" again within a short timeframe.
82  *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
83  *	Importantly, it indicates that its state is tied to a CPU.
84  *
85  *	LSZOMB:
86  *
87  *		Dead or dying: the LWP has released most of its resources
88  *		and is about to switch away into oblivion, or has already
89  *		switched away.  When it switches away, its few remaining
90  *		resources can be collected.
91  *
92  *	LSSLEEP:
93  *
94  *		Sleeping: the LWP has entered itself onto a sleep queue, and
95  *		has switched away or will switch away shortly to allow other
96  *		LWPs to run on the CPU.
97  *
98  *	LSSTOP:
99  *
100  *		Stopped: the LWP has been stopped as a result of a job
101  *		control signal, or as a result of the ptrace() interface.
102  *
103  *		Stopped LWPs may run briefly within the kernel to handle
104  *		signals that they receive, but will not return to user space
105  *		until their process' state is changed away from stopped.
106  *
107  *		Single LWPs within a process can not be set stopped
108  *		selectively: all actions that can stop or continue LWPs
109  *		occur at the process level.
110  *
111  * State transitions
112  *
113  *	Note that the LSSTOP state may only be set when returning to
114  *	user space in userret(), or when sleeping interruptably.  The
115  *	LSSUSPENDED state may only be set in userret().  Before setting
116  *	those states, we try to ensure that the LWPs will release all
117  *	locks that they hold, and at a minimum try to ensure that the
118  *	LWP can be set runnable again by a signal.
119  *
120  *	LWPs may transition states in the following ways:
121  *
122  *	 RUN -------> ONPROC		ONPROC -----> RUN
123  *		    				    > SLEEP
124  *		    				    > STOPPED
125  *						    > SUSPENDED
126  *						    > ZOMB
127  *						    > IDL (special cases)
128  *
129  *	 STOPPED ---> RUN		SUSPENDED --> RUN
130  *	            > SLEEP
131  *
132  *	 SLEEP -----> ONPROC		IDL --------> RUN
133  *		    > RUN			    > SUSPENDED
134  *		    > STOPPED			    > STOPPED
135  *						    > ONPROC (special cases)
136  *
137  *	Some state transitions are only possible with kernel threads (eg
138  *	ONPROC -> IDL) and happen under tightly controlled circumstances
139  *	free of unwanted side effects.
140  *
141  * Migration
142  *
143  *	Migration of threads from one CPU to another could be performed
144  *	internally by the scheduler via sched_takecpu() or sched_catchlwp()
145  *	functions.  The universal lwp_migrate() function should be used for
146  *	any other cases.  Subsystems in the kernel must be aware that CPU
147  *	of LWP may change, while it is not locked.
148  *
149  * Locking
150  *
151  *	The majority of fields in 'struct lwp' are covered by a single,
152  *	general spin lock pointed to by lwp::l_mutex.  The locks covering
153  *	each field are documented in sys/lwp.h.
154  *
155  *	State transitions must be made with the LWP's general lock held,
156  *	and may cause the LWP's lock pointer to change. Manipulation of
157  *	the general lock is not performed directly, but through calls to
158  *	lwp_lock(), lwp_relock() and similar.
159  *
160  *	States and their associated locks:
161  *
162  *	LSONPROC, LSZOMB:
163  *
164  *		Always covered by spc_lwplock, which protects running LWPs.
165  *		This is a per-CPU lock and matches lwp::l_cpu.
166  *
167  *	LSIDL, LSRUN:
168  *
169  *		Always covered by spc_mutex, which protects the run queues.
170  *		This is a per-CPU lock and matches lwp::l_cpu.
171  *
172  *	LSSLEEP:
173  *
174  *		Covered by a lock associated with the sleep queue that the
175  *		LWP resides on.  Matches lwp::l_sleepq::sq_mutex.
176  *
177  *	LSSTOP, LSSUSPENDED:
178  *
179  *		If the LWP was previously sleeping (l_wchan != NULL), then
180  *		l_mutex references the sleep queue lock.  If the LWP was
181  *		runnable or on the CPU when halted, or has been removed from
182  *		the sleep queue since halted, then the lock is spc_lwplock.
183  *
184  *	The lock order is as follows:
185  *
186  *		spc::spc_lwplock ->
187  *		    sleeptab::st_mutex ->
188  *			tschain_t::tc_mutex ->
189  *			    spc::spc_mutex
190  *
191  *	Each process has an scheduler state lock (proc::p_lock), and a
192  *	number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
193  *	so on.  When an LWP is to be entered into or removed from one of the
194  *	following states, p_lock must be held and the process wide counters
195  *	adjusted:
196  *
197  *		LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
198  *
199  *	(But not always for kernel threads.  There are some special cases
200  *	as mentioned above.  See kern_softint.c.)
201  *
202  *	Note that an LWP is considered running or likely to run soon if in
203  *	one of the following states.  This affects the value of p_nrlwps:
204  *
205  *		LSRUN, LSONPROC, LSSLEEP
206  *
207  *	p_lock does not need to be held when transitioning among these
208  *	three states, hence p_lock is rarely taken for state transitions.
209  */
210 
211 #include <sys/cdefs.h>
212 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.136 2009/10/27 02:58:28 rmind Exp $");
213 
214 #include "opt_ddb.h"
215 #include "opt_lockdebug.h"
216 #include "opt_sa.h"
217 
218 #define _LWP_API_PRIVATE
219 
220 #include <sys/param.h>
221 #include <sys/systm.h>
222 #include <sys/cpu.h>
223 #include <sys/pool.h>
224 #include <sys/proc.h>
225 #include <sys/sa.h>
226 #include <sys/savar.h>
227 #include <sys/syscallargs.h>
228 #include <sys/syscall_stats.h>
229 #include <sys/kauth.h>
230 #include <sys/sleepq.h>
231 #include <sys/user.h>
232 #include <sys/lockdebug.h>
233 #include <sys/kmem.h>
234 #include <sys/pset.h>
235 #include <sys/intr.h>
236 #include <sys/lwpctl.h>
237 #include <sys/atomic.h>
238 #include <sys/filedesc.h>
239 
240 #include <uvm/uvm_extern.h>
241 #include <uvm/uvm_object.h>
242 
243 struct lwplist	alllwp = LIST_HEAD_INITIALIZER(alllwp);
244 
245 struct pool lwp_uc_pool;
246 
247 static pool_cache_t lwp_cache;
248 static specificdata_domain_t lwp_specificdata_domain;
249 
250 void
251 lwpinit(void)
252 {
253 
254 	pool_init(&lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
255 	    &pool_allocator_nointr, IPL_NONE);
256 	lwp_specificdata_domain = specificdata_domain_create();
257 	KASSERT(lwp_specificdata_domain != NULL);
258 	lwp_sys_init();
259 	lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
260 	    "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
261 }
262 
263 /*
264  * Set an suspended.
265  *
266  * Must be called with p_lock held, and the LWP locked.  Will unlock the
267  * LWP before return.
268  */
269 int
270 lwp_suspend(struct lwp *curl, struct lwp *t)
271 {
272 	int error;
273 
274 	KASSERT(mutex_owned(t->l_proc->p_lock));
275 	KASSERT(lwp_locked(t, NULL));
276 
277 	KASSERT(curl != t || curl->l_stat == LSONPROC);
278 
279 	/*
280 	 * If the current LWP has been told to exit, we must not suspend anyone
281 	 * else or deadlock could occur.  We won't return to userspace.
282 	 */
283 	if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
284 		lwp_unlock(t);
285 		return (EDEADLK);
286 	}
287 
288 	error = 0;
289 
290 	switch (t->l_stat) {
291 	case LSRUN:
292 	case LSONPROC:
293 		t->l_flag |= LW_WSUSPEND;
294 		lwp_need_userret(t);
295 		lwp_unlock(t);
296 		break;
297 
298 	case LSSLEEP:
299 		t->l_flag |= LW_WSUSPEND;
300 
301 		/*
302 		 * Kick the LWP and try to get it to the kernel boundary
303 		 * so that it will release any locks that it holds.
304 		 * setrunnable() will release the lock.
305 		 */
306 		if ((t->l_flag & LW_SINTR) != 0)
307 			setrunnable(t);
308 		else
309 			lwp_unlock(t);
310 		break;
311 
312 	case LSSUSPENDED:
313 		lwp_unlock(t);
314 		break;
315 
316 	case LSSTOP:
317 		t->l_flag |= LW_WSUSPEND;
318 		setrunnable(t);
319 		break;
320 
321 	case LSIDL:
322 	case LSZOMB:
323 		error = EINTR; /* It's what Solaris does..... */
324 		lwp_unlock(t);
325 		break;
326 	}
327 
328 	return (error);
329 }
330 
331 /*
332  * Restart a suspended LWP.
333  *
334  * Must be called with p_lock held, and the LWP locked.  Will unlock the
335  * LWP before return.
336  */
337 void
338 lwp_continue(struct lwp *l)
339 {
340 
341 	KASSERT(mutex_owned(l->l_proc->p_lock));
342 	KASSERT(lwp_locked(l, NULL));
343 
344 	/* If rebooting or not suspended, then just bail out. */
345 	if ((l->l_flag & LW_WREBOOT) != 0) {
346 		lwp_unlock(l);
347 		return;
348 	}
349 
350 	l->l_flag &= ~LW_WSUSPEND;
351 
352 	if (l->l_stat != LSSUSPENDED) {
353 		lwp_unlock(l);
354 		return;
355 	}
356 
357 	/* setrunnable() will release the lock. */
358 	setrunnable(l);
359 }
360 
361 /*
362  * Wait for an LWP within the current process to exit.  If 'lid' is
363  * non-zero, we are waiting for a specific LWP.
364  *
365  * Must be called with p->p_lock held.
366  */
367 int
368 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
369 {
370 	struct proc *p = l->l_proc;
371 	struct lwp *l2;
372 	int nfound, error;
373 	lwpid_t curlid;
374 	bool exiting;
375 
376 	KASSERT(mutex_owned(p->p_lock));
377 
378 	p->p_nlwpwait++;
379 	l->l_waitingfor = lid;
380 	curlid = l->l_lid;
381 	exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
382 
383 	for (;;) {
384 		/*
385 		 * Avoid a race between exit1() and sigexit(): if the
386 		 * process is dumping core, then we need to bail out: call
387 		 * into lwp_userret() where we will be suspended until the
388 		 * deed is done.
389 		 */
390 		if ((p->p_sflag & PS_WCORE) != 0) {
391 			mutex_exit(p->p_lock);
392 			lwp_userret(l);
393 #ifdef DIAGNOSTIC
394 			panic("lwp_wait1");
395 #endif
396 			/* NOTREACHED */
397 		}
398 
399 		/*
400 		 * First off, drain any detached LWP that is waiting to be
401 		 * reaped.
402 		 */
403 		while ((l2 = p->p_zomblwp) != NULL) {
404 			p->p_zomblwp = NULL;
405 			lwp_free(l2, false, false);/* releases proc mutex */
406 			mutex_enter(p->p_lock);
407 		}
408 
409 		/*
410 		 * Now look for an LWP to collect.  If the whole process is
411 		 * exiting, count detached LWPs as eligible to be collected,
412 		 * but don't drain them here.
413 		 */
414 		nfound = 0;
415 		error = 0;
416 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
417 			/*
418 			 * If a specific wait and the target is waiting on
419 			 * us, then avoid deadlock.  This also traps LWPs
420 			 * that try to wait on themselves.
421 			 *
422 			 * Note that this does not handle more complicated
423 			 * cycles, like: t1 -> t2 -> t3 -> t1.  The process
424 			 * can still be killed so it is not a major problem.
425 			 */
426 			if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
427 				error = EDEADLK;
428 				break;
429 			}
430 			if (l2 == l)
431 				continue;
432 			if ((l2->l_prflag & LPR_DETACHED) != 0) {
433 				nfound += exiting;
434 				continue;
435 			}
436 			if (lid != 0) {
437 				if (l2->l_lid != lid)
438 					continue;
439 				/*
440 				 * Mark this LWP as the first waiter, if there
441 				 * is no other.
442 				 */
443 				if (l2->l_waiter == 0)
444 					l2->l_waiter = curlid;
445 			} else if (l2->l_waiter != 0) {
446 				/*
447 				 * It already has a waiter - so don't
448 				 * collect it.  If the waiter doesn't
449 				 * grab it we'll get another chance
450 				 * later.
451 				 */
452 				nfound++;
453 				continue;
454 			}
455 			nfound++;
456 
457 			/* No need to lock the LWP in order to see LSZOMB. */
458 			if (l2->l_stat != LSZOMB)
459 				continue;
460 
461 			/*
462 			 * We're no longer waiting.  Reset the "first waiter"
463 			 * pointer on the target, in case it was us.
464 			 */
465 			l->l_waitingfor = 0;
466 			l2->l_waiter = 0;
467 			p->p_nlwpwait--;
468 			if (departed)
469 				*departed = l2->l_lid;
470 			sched_lwp_collect(l2);
471 
472 			/* lwp_free() releases the proc lock. */
473 			lwp_free(l2, false, false);
474 			mutex_enter(p->p_lock);
475 			return 0;
476 		}
477 
478 		if (error != 0)
479 			break;
480 		if (nfound == 0) {
481 			error = ESRCH;
482 			break;
483 		}
484 
485 		/*
486 		 * The kernel is careful to ensure that it can not deadlock
487 		 * when exiting - just keep waiting.
488 		 */
489 		if (exiting) {
490 			KASSERT(p->p_nlwps > 1);
491 			cv_wait(&p->p_lwpcv, p->p_lock);
492 			continue;
493 		}
494 
495 		/*
496 		 * If all other LWPs are waiting for exits or suspends
497 		 * and the supply of zombies and potential zombies is
498 		 * exhausted, then we are about to deadlock.
499 		 *
500 		 * If the process is exiting (and this LWP is not the one
501 		 * that is coordinating the exit) then bail out now.
502 		 */
503 		if ((p->p_sflag & PS_WEXIT) != 0 ||
504 		    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
505 			error = EDEADLK;
506 			break;
507 		}
508 
509 		/*
510 		 * Sit around and wait for something to happen.  We'll be
511 		 * awoken if any of the conditions examined change: if an
512 		 * LWP exits, is collected, or is detached.
513 		 */
514 		if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
515 			break;
516 	}
517 
518 	/*
519 	 * We didn't find any LWPs to collect, we may have received a
520 	 * signal, or some other condition has caused us to bail out.
521 	 *
522 	 * If waiting on a specific LWP, clear the waiters marker: some
523 	 * other LWP may want it.  Then, kick all the remaining waiters
524 	 * so that they can re-check for zombies and for deadlock.
525 	 */
526 	if (lid != 0) {
527 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
528 			if (l2->l_lid == lid) {
529 				if (l2->l_waiter == curlid)
530 					l2->l_waiter = 0;
531 				break;
532 			}
533 		}
534 	}
535 	p->p_nlwpwait--;
536 	l->l_waitingfor = 0;
537 	cv_broadcast(&p->p_lwpcv);
538 
539 	return error;
540 }
541 
542 /*
543  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
544  * The new LWP is created in state LSIDL and must be set running,
545  * suspended, or stopped by the caller.
546  */
547 int
548 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
549 	   void *stack, size_t stacksize, void (*func)(void *), void *arg,
550 	   lwp_t **rnewlwpp, int sclass)
551 {
552 	struct lwp *l2, *isfree;
553 	turnstile_t *ts;
554 
555 	KASSERT(l1 == curlwp || l1->l_proc == &proc0);
556 
557 	/*
558 	 * First off, reap any detached LWP waiting to be collected.
559 	 * We can re-use its LWP structure and turnstile.
560 	 */
561 	isfree = NULL;
562 	if (p2->p_zomblwp != NULL) {
563 		mutex_enter(p2->p_lock);
564 		if ((isfree = p2->p_zomblwp) != NULL) {
565 			p2->p_zomblwp = NULL;
566 			lwp_free(isfree, true, false);/* releases proc mutex */
567 		} else
568 			mutex_exit(p2->p_lock);
569 	}
570 	if (isfree == NULL) {
571 		l2 = pool_cache_get(lwp_cache, PR_WAITOK);
572 		memset(l2, 0, sizeof(*l2));
573 		l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
574 		SLIST_INIT(&l2->l_pi_lenders);
575 	} else {
576 		l2 = isfree;
577 		ts = l2->l_ts;
578 		KASSERT(l2->l_inheritedprio == -1);
579 		KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
580 		memset(l2, 0, sizeof(*l2));
581 		l2->l_ts = ts;
582 	}
583 
584 	l2->l_stat = LSIDL;
585 	l2->l_proc = p2;
586 	l2->l_refcnt = 1;
587 	l2->l_class = sclass;
588 
589 	/*
590 	 * If vfork(), we want the LWP to run fast and on the same CPU
591 	 * as its parent, so that it can reuse the VM context and cache
592 	 * footprint on the local CPU.
593 	 */
594 	l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
595 	l2->l_kpribase = PRI_KERNEL;
596 	l2->l_priority = l1->l_priority;
597 	l2->l_inheritedprio = -1;
598 	l2->l_flag = 0;
599 	l2->l_pflag = LP_MPSAFE;
600 	TAILQ_INIT(&l2->l_ld_locks);
601 
602 	/*
603 	 * If not the first LWP in the process, grab a reference to the
604 	 * descriptor table.
605 	 */
606 	l2->l_fd = p2->p_fd;
607 	if (p2->p_nlwps != 0) {
608 		KASSERT(l1->l_proc == p2);
609 		fd_hold(l2);
610 	} else {
611 		KASSERT(l1->l_proc != p2);
612 	}
613 
614 	if (p2->p_flag & PK_SYSTEM) {
615 		/* Mark it as a system LWP. */
616 		l2->l_flag |= LW_SYSTEM;
617 	}
618 
619 	kpreempt_disable();
620 	l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
621 	l2->l_cpu = l1->l_cpu;
622 	kpreempt_enable();
623 
624 	lwp_initspecific(l2);
625 	sched_lwp_fork(l1, l2);
626 	lwp_update_creds(l2);
627 	callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
628 	callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
629 	cv_init(&l2->l_sigcv, "sigwait");
630 	l2->l_syncobj = &sched_syncobj;
631 
632 	if (rnewlwpp != NULL)
633 		*rnewlwpp = l2;
634 
635 	l2->l_addr = UAREA_TO_USER(uaddr);
636 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
637 	    (arg != NULL) ? arg : l2);
638 
639 	mutex_enter(p2->p_lock);
640 
641 	if ((flags & LWP_DETACHED) != 0) {
642 		l2->l_prflag = LPR_DETACHED;
643 		p2->p_ndlwps++;
644 	} else
645 		l2->l_prflag = 0;
646 
647 	l2->l_sigmask = l1->l_sigmask;
648 	CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
649 	sigemptyset(&l2->l_sigpend.sp_set);
650 
651 	p2->p_nlwpid++;
652 	if (p2->p_nlwpid == 0)
653 		p2->p_nlwpid++;
654 	l2->l_lid = p2->p_nlwpid;
655 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
656 	p2->p_nlwps++;
657 
658 	if ((p2->p_flag & PK_SYSTEM) == 0) {
659 		/* Inherit an affinity */
660 		if (l1->l_flag & LW_AFFINITY) {
661 			/*
662 			 * Note that we hold the state lock while inheriting
663 			 * the affinity to avoid race with sched_setaffinity().
664 			 */
665 			lwp_lock(l1);
666 			if (l1->l_flag & LW_AFFINITY) {
667 				kcpuset_use(l1->l_affinity);
668 				l2->l_affinity = l1->l_affinity;
669 				l2->l_flag |= LW_AFFINITY;
670 			}
671 			lwp_unlock(l1);
672 		}
673 		lwp_lock(l2);
674 		/* Inherit a processor-set */
675 		l2->l_psid = l1->l_psid;
676 		/* Look for a CPU to start */
677 		l2->l_cpu = sched_takecpu(l2);
678 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
679 	}
680 	mutex_exit(p2->p_lock);
681 
682 	mutex_enter(proc_lock);
683 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
684 	mutex_exit(proc_lock);
685 
686 	SYSCALL_TIME_LWP_INIT(l2);
687 
688 	if (p2->p_emul->e_lwp_fork)
689 		(*p2->p_emul->e_lwp_fork)(l1, l2);
690 
691 	return (0);
692 }
693 
694 /*
695  * Called by MD code when a new LWP begins execution.  Must be called
696  * with the previous LWP locked (so at splsched), or if there is no
697  * previous LWP, at splsched.
698  */
699 void
700 lwp_startup(struct lwp *prev, struct lwp *new)
701 {
702 
703 	KASSERT(kpreempt_disabled());
704 	if (prev != NULL) {
705 		/*
706 		 * Normalize the count of the spin-mutexes, it was
707 		 * increased in mi_switch().  Unmark the state of
708 		 * context switch - it is finished for previous LWP.
709 		 */
710 		curcpu()->ci_mtx_count++;
711 		membar_exit();
712 		prev->l_ctxswtch = 0;
713 	}
714 	KPREEMPT_DISABLE(new);
715 	spl0();
716 	pmap_activate(new);
717 	LOCKDEBUG_BARRIER(NULL, 0);
718 	KPREEMPT_ENABLE(new);
719 	if ((new->l_pflag & LP_MPSAFE) == 0) {
720 		KERNEL_LOCK(1, new);
721 	}
722 }
723 
724 /*
725  * Exit an LWP.
726  */
727 void
728 lwp_exit(struct lwp *l)
729 {
730 	struct proc *p = l->l_proc;
731 	struct lwp *l2;
732 	bool current;
733 
734 	current = (l == curlwp);
735 
736 	KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
737 	KASSERT(p == curproc);
738 
739 	/*
740 	 * Verify that we hold no locks other than the kernel lock.
741 	 */
742 	LOCKDEBUG_BARRIER(&kernel_lock, 0);
743 
744 	/*
745 	 * If we are the last live LWP in a process, we need to exit the
746 	 * entire process.  We do so with an exit status of zero, because
747 	 * it's a "controlled" exit, and because that's what Solaris does.
748 	 *
749 	 * We are not quite a zombie yet, but for accounting purposes we
750 	 * must increment the count of zombies here.
751 	 *
752 	 * Note: the last LWP's specificdata will be deleted here.
753 	 */
754 	mutex_enter(p->p_lock);
755 	if (p->p_nlwps - p->p_nzlwps == 1) {
756 		KASSERT(current == true);
757 		/* XXXSMP kernel_lock not held */
758 		exit1(l, 0);
759 		/* NOTREACHED */
760 	}
761 	p->p_nzlwps++;
762 	mutex_exit(p->p_lock);
763 
764 	if (p->p_emul->e_lwp_exit)
765 		(*p->p_emul->e_lwp_exit)(l);
766 
767 	/* Drop filedesc reference. */
768 	fd_free();
769 
770 	/* Delete the specificdata while it's still safe to sleep. */
771 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
772 
773 	/*
774 	 * Release our cached credentials.
775 	 */
776 	kauth_cred_free(l->l_cred);
777 	callout_destroy(&l->l_timeout_ch);
778 
779 	/*
780 	 * Remove the LWP from the global list.
781 	 */
782 	mutex_enter(proc_lock);
783 	LIST_REMOVE(l, l_list);
784 	mutex_exit(proc_lock);
785 
786 	/*
787 	 * Get rid of all references to the LWP that others (e.g. procfs)
788 	 * may have, and mark the LWP as a zombie.  If the LWP is detached,
789 	 * mark it waiting for collection in the proc structure.  Note that
790 	 * before we can do that, we need to free any other dead, deatched
791 	 * LWP waiting to meet its maker.
792 	 */
793 	mutex_enter(p->p_lock);
794 	lwp_drainrefs(l);
795 
796 	if ((l->l_prflag & LPR_DETACHED) != 0) {
797 		while ((l2 = p->p_zomblwp) != NULL) {
798 			p->p_zomblwp = NULL;
799 			lwp_free(l2, false, false);/* releases proc mutex */
800 			mutex_enter(p->p_lock);
801 			l->l_refcnt++;
802 			lwp_drainrefs(l);
803 		}
804 		p->p_zomblwp = l;
805 	}
806 
807 	/*
808 	 * If we find a pending signal for the process and we have been
809 	 * asked to check for signals, then we loose: arrange to have
810 	 * all other LWPs in the process check for signals.
811 	 */
812 	if ((l->l_flag & LW_PENDSIG) != 0 &&
813 	    firstsig(&p->p_sigpend.sp_set) != 0) {
814 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
815 			lwp_lock(l2);
816 			l2->l_flag |= LW_PENDSIG;
817 			lwp_unlock(l2);
818 		}
819 	}
820 
821 	lwp_lock(l);
822 	l->l_stat = LSZOMB;
823 	if (l->l_name != NULL)
824 		strcpy(l->l_name, "(zombie)");
825 	if (l->l_flag & LW_AFFINITY) {
826 		l->l_flag &= ~LW_AFFINITY;
827 	} else {
828 		KASSERT(l->l_affinity == NULL);
829 	}
830 	lwp_unlock(l);
831 	p->p_nrlwps--;
832 	cv_broadcast(&p->p_lwpcv);
833 	if (l->l_lwpctl != NULL)
834 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
835 	mutex_exit(p->p_lock);
836 
837 	/* Safe without lock since LWP is in zombie state */
838 	if (l->l_affinity) {
839 		kcpuset_unuse(l->l_affinity, NULL);
840 		l->l_affinity = NULL;
841 	}
842 
843 	/*
844 	 * We can no longer block.  At this point, lwp_free() may already
845 	 * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
846 	 *
847 	 * Free MD LWP resources.
848 	 */
849 	cpu_lwp_free(l, 0);
850 
851 	if (current) {
852 		pmap_deactivate(l);
853 
854 		/*
855 		 * Release the kernel lock, and switch away into
856 		 * oblivion.
857 		 */
858 #ifdef notyet
859 		/* XXXSMP hold in lwp_userret() */
860 		KERNEL_UNLOCK_LAST(l);
861 #else
862 		KERNEL_UNLOCK_ALL(l, NULL);
863 #endif
864 		lwp_exit_switchaway(l);
865 	}
866 }
867 
868 /*
869  * Free a dead LWP's remaining resources.
870  *
871  * XXXLWP limits.
872  */
873 void
874 lwp_free(struct lwp *l, bool recycle, bool last)
875 {
876 	struct proc *p = l->l_proc;
877 	struct rusage *ru;
878 	ksiginfoq_t kq;
879 
880 	KASSERT(l != curlwp);
881 
882 	/*
883 	 * If this was not the last LWP in the process, then adjust
884 	 * counters and unlock.
885 	 */
886 	if (!last) {
887 		/*
888 		 * Add the LWP's run time to the process' base value.
889 		 * This needs to co-incide with coming off p_lwps.
890 		 */
891 		bintime_add(&p->p_rtime, &l->l_rtime);
892 		p->p_pctcpu += l->l_pctcpu;
893 		ru = &p->p_stats->p_ru;
894 		ruadd(ru, &l->l_ru);
895 		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
896 		ru->ru_nivcsw += l->l_nivcsw;
897 		LIST_REMOVE(l, l_sibling);
898 		p->p_nlwps--;
899 		p->p_nzlwps--;
900 		if ((l->l_prflag & LPR_DETACHED) != 0)
901 			p->p_ndlwps--;
902 
903 		/*
904 		 * Have any LWPs sleeping in lwp_wait() recheck for
905 		 * deadlock.
906 		 */
907 		cv_broadcast(&p->p_lwpcv);
908 		mutex_exit(p->p_lock);
909 	}
910 
911 #ifdef MULTIPROCESSOR
912 	/*
913 	 * In the unlikely event that the LWP is still on the CPU,
914 	 * then spin until it has switched away.  We need to release
915 	 * all locks to avoid deadlock against interrupt handlers on
916 	 * the target CPU.
917 	 */
918 	if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
919 		int count;
920 		(void)count; /* XXXgcc */
921 		KERNEL_UNLOCK_ALL(curlwp, &count);
922 		while ((l->l_pflag & LP_RUNNING) != 0 ||
923 		    l->l_cpu->ci_curlwp == l)
924 			SPINLOCK_BACKOFF_HOOK;
925 		KERNEL_LOCK(count, curlwp);
926 	}
927 #endif
928 
929 	/*
930 	 * Destroy the LWP's remaining signal information.
931 	 */
932 	ksiginfo_queue_init(&kq);
933 	sigclear(&l->l_sigpend, NULL, &kq);
934 	ksiginfo_queue_drain(&kq);
935 	cv_destroy(&l->l_sigcv);
936 
937 	/*
938 	 * Free the LWP's turnstile and the LWP structure itself unless the
939 	 * caller wants to recycle them.  Also, free the scheduler specific
940 	 * data.
941 	 *
942 	 * We can't return turnstile0 to the pool (it didn't come from it),
943 	 * so if it comes up just drop it quietly and move on.
944 	 *
945 	 * We don't recycle the VM resources at this time.
946 	 */
947 	if (l->l_lwpctl != NULL)
948 		lwp_ctl_free(l);
949 
950 	if (!recycle && l->l_ts != &turnstile0)
951 		pool_cache_put(turnstile_cache, l->l_ts);
952 	if (l->l_name != NULL)
953 		kmem_free(l->l_name, MAXCOMLEN);
954 
955 	cpu_lwp_free2(l);
956 	uvm_lwp_exit(l);
957 
958 	KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
959 	KASSERT(l->l_inheritedprio == -1);
960 	if (!recycle)
961 		pool_cache_put(lwp_cache, l);
962 }
963 
964 /*
965  * Migrate the LWP to the another CPU.  Unlocks the LWP.
966  */
967 void
968 lwp_migrate(lwp_t *l, struct cpu_info *tci)
969 {
970 	struct schedstate_percpu *tspc;
971 	int lstat = l->l_stat;
972 
973 	KASSERT(lwp_locked(l, NULL));
974 	KASSERT(tci != NULL);
975 
976 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
977 	if ((l->l_pflag & LP_RUNNING) != 0) {
978 		lstat = LSONPROC;
979 	}
980 
981 	/*
982 	 * The destination CPU could be changed while previous migration
983 	 * was not finished.
984 	 */
985 	if (l->l_target_cpu != NULL) {
986 		l->l_target_cpu = tci;
987 		lwp_unlock(l);
988 		return;
989 	}
990 
991 	/* Nothing to do if trying to migrate to the same CPU */
992 	if (l->l_cpu == tci) {
993 		lwp_unlock(l);
994 		return;
995 	}
996 
997 	KASSERT(l->l_target_cpu == NULL);
998 	tspc = &tci->ci_schedstate;
999 	switch (lstat) {
1000 	case LSRUN:
1001 		l->l_target_cpu = tci;
1002 		break;
1003 	case LSIDL:
1004 		l->l_cpu = tci;
1005 		lwp_unlock_to(l, tspc->spc_mutex);
1006 		return;
1007 	case LSSLEEP:
1008 		l->l_cpu = tci;
1009 		break;
1010 	case LSSTOP:
1011 	case LSSUSPENDED:
1012 		l->l_cpu = tci;
1013 		if (l->l_wchan == NULL) {
1014 			lwp_unlock_to(l, tspc->spc_lwplock);
1015 			return;
1016 		}
1017 		break;
1018 	case LSONPROC:
1019 		l->l_target_cpu = tci;
1020 		spc_lock(l->l_cpu);
1021 		cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
1022 		spc_unlock(l->l_cpu);
1023 		break;
1024 	}
1025 	lwp_unlock(l);
1026 }
1027 
1028 /*
1029  * Find the LWP in the process.  Arguments may be zero, in such case,
1030  * the calling process and first LWP in the list will be used.
1031  * On success - returns proc locked.
1032  */
1033 struct lwp *
1034 lwp_find2(pid_t pid, lwpid_t lid)
1035 {
1036 	proc_t *p;
1037 	lwp_t *l;
1038 
1039 	/* Find the process */
1040 	p = (pid == 0) ? curlwp->l_proc : p_find(pid, PFIND_UNLOCK_FAIL);
1041 	if (p == NULL)
1042 		return NULL;
1043 	mutex_enter(p->p_lock);
1044 	if (pid != 0) {
1045 		/* Case of p_find */
1046 		mutex_exit(proc_lock);
1047 	}
1048 
1049 	/* Find the thread */
1050 	l = (lid == 0) ? LIST_FIRST(&p->p_lwps) : lwp_find(p, lid);
1051 	if (l == NULL) {
1052 		mutex_exit(p->p_lock);
1053 	}
1054 
1055 	return l;
1056 }
1057 
1058 /*
1059  * Look up a live LWP within the speicifed process, and return it locked.
1060  *
1061  * Must be called with p->p_lock held.
1062  */
1063 struct lwp *
1064 lwp_find(struct proc *p, int id)
1065 {
1066 	struct lwp *l;
1067 
1068 	KASSERT(mutex_owned(p->p_lock));
1069 
1070 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1071 		if (l->l_lid == id)
1072 			break;
1073 	}
1074 
1075 	/*
1076 	 * No need to lock - all of these conditions will
1077 	 * be visible with the process level mutex held.
1078 	 */
1079 	if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1080 		l = NULL;
1081 
1082 	return l;
1083 }
1084 
1085 /*
1086  * Update an LWP's cached credentials to mirror the process' master copy.
1087  *
1088  * This happens early in the syscall path, on user trap, and on LWP
1089  * creation.  A long-running LWP can also voluntarily choose to update
1090  * it's credentials by calling this routine.  This may be called from
1091  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1092  */
1093 void
1094 lwp_update_creds(struct lwp *l)
1095 {
1096 	kauth_cred_t oc;
1097 	struct proc *p;
1098 
1099 	p = l->l_proc;
1100 	oc = l->l_cred;
1101 
1102 	mutex_enter(p->p_lock);
1103 	kauth_cred_hold(p->p_cred);
1104 	l->l_cred = p->p_cred;
1105 	l->l_prflag &= ~LPR_CRMOD;
1106 	mutex_exit(p->p_lock);
1107 	if (oc != NULL)
1108 		kauth_cred_free(oc);
1109 }
1110 
1111 /*
1112  * Verify that an LWP is locked, and optionally verify that the lock matches
1113  * one we specify.
1114  */
1115 int
1116 lwp_locked(struct lwp *l, kmutex_t *mtx)
1117 {
1118 	kmutex_t *cur = l->l_mutex;
1119 
1120 	return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1121 }
1122 
1123 /*
1124  * Lock an LWP.
1125  */
1126 kmutex_t *
1127 lwp_lock_retry(struct lwp *l, kmutex_t *old)
1128 {
1129 
1130 	/*
1131 	 * XXXgcc ignoring kmutex_t * volatile on i386
1132 	 *
1133 	 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1134 	 */
1135 #if 1
1136 	while (l->l_mutex != old) {
1137 #else
1138 	for (;;) {
1139 #endif
1140 		mutex_spin_exit(old);
1141 		old = l->l_mutex;
1142 		mutex_spin_enter(old);
1143 
1144 		/*
1145 		 * mutex_enter() will have posted a read barrier.  Re-test
1146 		 * l->l_mutex.  If it has changed, we need to try again.
1147 		 */
1148 #if 1
1149 	}
1150 #else
1151 	} while (__predict_false(l->l_mutex != old));
1152 #endif
1153 
1154 	return old;
1155 }
1156 
1157 /*
1158  * Lend a new mutex to an LWP.  The old mutex must be held.
1159  */
1160 void
1161 lwp_setlock(struct lwp *l, kmutex_t *new)
1162 {
1163 
1164 	KASSERT(mutex_owned(l->l_mutex));
1165 
1166 	membar_exit();
1167 	l->l_mutex = new;
1168 }
1169 
1170 /*
1171  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
1172  * must be held.
1173  */
1174 void
1175 lwp_unlock_to(struct lwp *l, kmutex_t *new)
1176 {
1177 	kmutex_t *old;
1178 
1179 	KASSERT(mutex_owned(l->l_mutex));
1180 
1181 	old = l->l_mutex;
1182 	membar_exit();
1183 	l->l_mutex = new;
1184 	mutex_spin_exit(old);
1185 }
1186 
1187 /*
1188  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
1189  * locked.
1190  */
1191 void
1192 lwp_relock(struct lwp *l, kmutex_t *new)
1193 {
1194 	kmutex_t *old;
1195 
1196 	KASSERT(mutex_owned(l->l_mutex));
1197 
1198 	old = l->l_mutex;
1199 	if (old != new) {
1200 		mutex_spin_enter(new);
1201 		l->l_mutex = new;
1202 		mutex_spin_exit(old);
1203 	}
1204 }
1205 
1206 int
1207 lwp_trylock(struct lwp *l)
1208 {
1209 	kmutex_t *old;
1210 
1211 	for (;;) {
1212 		if (!mutex_tryenter(old = l->l_mutex))
1213 			return 0;
1214 		if (__predict_true(l->l_mutex == old))
1215 			return 1;
1216 		mutex_spin_exit(old);
1217 	}
1218 }
1219 
1220 void
1221 lwp_unsleep(lwp_t *l, bool cleanup)
1222 {
1223 
1224 	KASSERT(mutex_owned(l->l_mutex));
1225 	(*l->l_syncobj->sobj_unsleep)(l, cleanup);
1226 }
1227 
1228 
1229 /*
1230  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
1231  * set.
1232  */
1233 void
1234 lwp_userret(struct lwp *l)
1235 {
1236 	struct proc *p;
1237 	void (*hook)(void);
1238 	int sig;
1239 
1240 	KASSERT(l == curlwp);
1241 	KASSERT(l->l_stat == LSONPROC);
1242 	p = l->l_proc;
1243 
1244 #ifndef __HAVE_FAST_SOFTINTS
1245 	/* Run pending soft interrupts. */
1246 	if (l->l_cpu->ci_data.cpu_softints != 0)
1247 		softint_overlay();
1248 #endif
1249 
1250 #ifdef KERN_SA
1251 	/* Generate UNBLOCKED upcall if needed */
1252 	if (l->l_flag & LW_SA_BLOCKING) {
1253 		sa_unblock_userret(l);
1254 		/* NOTREACHED */
1255 	}
1256 #endif
1257 
1258 	/*
1259 	 * It should be safe to do this read unlocked on a multiprocessor
1260 	 * system..
1261 	 *
1262 	 * LW_SA_UPCALL will be handled after the while() loop, so don't
1263 	 * consider it now.
1264 	 */
1265 	while ((l->l_flag & (LW_USERRET & ~(LW_SA_UPCALL))) != 0) {
1266 		/*
1267 		 * Process pending signals first, unless the process
1268 		 * is dumping core or exiting, where we will instead
1269 		 * enter the LW_WSUSPEND case below.
1270 		 */
1271 		if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1272 		    LW_PENDSIG) {
1273 			mutex_enter(p->p_lock);
1274 			while ((sig = issignal(l)) != 0)
1275 				postsig(sig);
1276 			mutex_exit(p->p_lock);
1277 		}
1278 
1279 		/*
1280 		 * Core-dump or suspend pending.
1281 		 *
1282 		 * In case of core dump, suspend ourselves, so that the
1283 		 * kernel stack and therefore the userland registers saved
1284 		 * in the trapframe are around for coredump() to write them
1285 		 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
1286 		 * will write the core file out once all other LWPs are
1287 		 * suspended.
1288 		 */
1289 		if ((l->l_flag & LW_WSUSPEND) != 0) {
1290 			mutex_enter(p->p_lock);
1291 			p->p_nrlwps--;
1292 			cv_broadcast(&p->p_lwpcv);
1293 			lwp_lock(l);
1294 			l->l_stat = LSSUSPENDED;
1295 			lwp_unlock(l);
1296 			mutex_exit(p->p_lock);
1297 			lwp_lock(l);
1298 			mi_switch(l);
1299 		}
1300 
1301 		/* Process is exiting. */
1302 		if ((l->l_flag & LW_WEXIT) != 0) {
1303 			lwp_exit(l);
1304 			KASSERT(0);
1305 			/* NOTREACHED */
1306 		}
1307 
1308 		/* Call userret hook; used by Linux emulation. */
1309 		if ((l->l_flag & LW_WUSERRET) != 0) {
1310 			lwp_lock(l);
1311 			l->l_flag &= ~LW_WUSERRET;
1312 			lwp_unlock(l);
1313 			hook = p->p_userret;
1314 			p->p_userret = NULL;
1315 			(*hook)();
1316 		}
1317 	}
1318 
1319 #ifdef KERN_SA
1320 	/*
1321 	 * Timer events are handled specially.  We only try once to deliver
1322 	 * pending timer upcalls; if if fails, we can try again on the next
1323 	 * loop around.  If we need to re-enter lwp_userret(), MD code will
1324 	 * bounce us back here through the trap path after we return.
1325 	 */
1326 	if (p->p_timerpend)
1327 		timerupcall(l);
1328 	if (l->l_flag & LW_SA_UPCALL)
1329 		sa_upcall_userret(l);
1330 #endif /* KERN_SA */
1331 }
1332 
1333 /*
1334  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1335  */
1336 void
1337 lwp_need_userret(struct lwp *l)
1338 {
1339 	KASSERT(lwp_locked(l, NULL));
1340 
1341 	/*
1342 	 * Since the tests in lwp_userret() are done unlocked, make sure
1343 	 * that the condition will be seen before forcing the LWP to enter
1344 	 * kernel mode.
1345 	 */
1346 	membar_producer();
1347 	cpu_signotify(l);
1348 }
1349 
1350 /*
1351  * Add one reference to an LWP.  This will prevent the LWP from
1352  * exiting, thus keep the lwp structure and PCB around to inspect.
1353  */
1354 void
1355 lwp_addref(struct lwp *l)
1356 {
1357 
1358 	KASSERT(mutex_owned(l->l_proc->p_lock));
1359 	KASSERT(l->l_stat != LSZOMB);
1360 	KASSERT(l->l_refcnt != 0);
1361 
1362 	l->l_refcnt++;
1363 }
1364 
1365 /*
1366  * Remove one reference to an LWP.  If this is the last reference,
1367  * then we must finalize the LWP's death.
1368  */
1369 void
1370 lwp_delref(struct lwp *l)
1371 {
1372 	struct proc *p = l->l_proc;
1373 
1374 	mutex_enter(p->p_lock);
1375 	KASSERT(l->l_stat != LSZOMB);
1376 	KASSERT(l->l_refcnt > 0);
1377 	if (--l->l_refcnt == 0)
1378 		cv_broadcast(&p->p_lwpcv);
1379 	mutex_exit(p->p_lock);
1380 }
1381 
1382 /*
1383  * Drain all references to the current LWP.
1384  */
1385 void
1386 lwp_drainrefs(struct lwp *l)
1387 {
1388 	struct proc *p = l->l_proc;
1389 
1390 	KASSERT(mutex_owned(p->p_lock));
1391 	KASSERT(l->l_refcnt != 0);
1392 
1393 	l->l_refcnt--;
1394 	while (l->l_refcnt != 0)
1395 		cv_wait(&p->p_lwpcv, p->p_lock);
1396 }
1397 
1398 /*
1399  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
1400  * be held.
1401  */
1402 bool
1403 lwp_alive(lwp_t *l)
1404 {
1405 
1406 	KASSERT(mutex_owned(l->l_proc->p_lock));
1407 
1408 	switch (l->l_stat) {
1409 	case LSSLEEP:
1410 	case LSRUN:
1411 	case LSONPROC:
1412 	case LSSTOP:
1413 	case LSSUSPENDED:
1414 		return true;
1415 	default:
1416 		return false;
1417 	}
1418 }
1419 
1420 /*
1421  * Return first live LWP in the process.
1422  */
1423 lwp_t *
1424 lwp_find_first(proc_t *p)
1425 {
1426 	lwp_t *l;
1427 
1428 	KASSERT(mutex_owned(p->p_lock));
1429 
1430 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1431 		if (lwp_alive(l)) {
1432 			return l;
1433 		}
1434 	}
1435 
1436 	return NULL;
1437 }
1438 
1439 /*
1440  * lwp_specific_key_create --
1441  *	Create a key for subsystem lwp-specific data.
1442  */
1443 int
1444 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1445 {
1446 
1447 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1448 }
1449 
1450 /*
1451  * lwp_specific_key_delete --
1452  *	Delete a key for subsystem lwp-specific data.
1453  */
1454 void
1455 lwp_specific_key_delete(specificdata_key_t key)
1456 {
1457 
1458 	specificdata_key_delete(lwp_specificdata_domain, key);
1459 }
1460 
1461 /*
1462  * lwp_initspecific --
1463  *	Initialize an LWP's specificdata container.
1464  */
1465 void
1466 lwp_initspecific(struct lwp *l)
1467 {
1468 	int error;
1469 
1470 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1471 	KASSERT(error == 0);
1472 }
1473 
1474 /*
1475  * lwp_finispecific --
1476  *	Finalize an LWP's specificdata container.
1477  */
1478 void
1479 lwp_finispecific(struct lwp *l)
1480 {
1481 
1482 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1483 }
1484 
1485 /*
1486  * lwp_getspecific --
1487  *	Return lwp-specific data corresponding to the specified key.
1488  *
1489  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
1490  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
1491  *	LWP's specifc data, care must be taken to ensure that doing so
1492  *	would not cause internal data structure inconsistency (i.e. caller
1493  *	can guarantee that the target LWP is not inside an lwp_getspecific()
1494  *	or lwp_setspecific() call).
1495  */
1496 void *
1497 lwp_getspecific(specificdata_key_t key)
1498 {
1499 
1500 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1501 						  &curlwp->l_specdataref, key));
1502 }
1503 
1504 void *
1505 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1506 {
1507 
1508 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1509 						  &l->l_specdataref, key));
1510 }
1511 
1512 /*
1513  * lwp_setspecific --
1514  *	Set lwp-specific data corresponding to the specified key.
1515  */
1516 void
1517 lwp_setspecific(specificdata_key_t key, void *data)
1518 {
1519 
1520 	specificdata_setspecific(lwp_specificdata_domain,
1521 				 &curlwp->l_specdataref, key, data);
1522 }
1523 
1524 /*
1525  * Allocate a new lwpctl structure for a user LWP.
1526  */
1527 int
1528 lwp_ctl_alloc(vaddr_t *uaddr)
1529 {
1530 	lcproc_t *lp;
1531 	u_int bit, i, offset;
1532 	struct uvm_object *uao;
1533 	int error;
1534 	lcpage_t *lcp;
1535 	proc_t *p;
1536 	lwp_t *l;
1537 
1538 	l = curlwp;
1539 	p = l->l_proc;
1540 
1541 	if (l->l_lcpage != NULL) {
1542 		lcp = l->l_lcpage;
1543 		*uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1544 		return (EINVAL);
1545 	}
1546 
1547 	/* First time around, allocate header structure for the process. */
1548 	if ((lp = p->p_lwpctl) == NULL) {
1549 		lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1550 		mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1551 		lp->lp_uao = NULL;
1552 		TAILQ_INIT(&lp->lp_pages);
1553 		mutex_enter(p->p_lock);
1554 		if (p->p_lwpctl == NULL) {
1555 			p->p_lwpctl = lp;
1556 			mutex_exit(p->p_lock);
1557 		} else {
1558 			mutex_exit(p->p_lock);
1559 			mutex_destroy(&lp->lp_lock);
1560 			kmem_free(lp, sizeof(*lp));
1561 			lp = p->p_lwpctl;
1562 		}
1563 	}
1564 
1565  	/*
1566  	 * Set up an anonymous memory region to hold the shared pages.
1567  	 * Map them into the process' address space.  The user vmspace
1568  	 * gets the first reference on the UAO.
1569  	 */
1570 	mutex_enter(&lp->lp_lock);
1571 	if (lp->lp_uao == NULL) {
1572 		lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1573 		lp->lp_cur = 0;
1574 		lp->lp_max = LWPCTL_UAREA_SZ;
1575 		lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1576 		     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
1577 		error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1578 		    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1579 		    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1580 		if (error != 0) {
1581 			uao_detach(lp->lp_uao);
1582 			lp->lp_uao = NULL;
1583 			mutex_exit(&lp->lp_lock);
1584 			return error;
1585 		}
1586 	}
1587 
1588 	/* Get a free block and allocate for this LWP. */
1589 	TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1590 		if (lcp->lcp_nfree != 0)
1591 			break;
1592 	}
1593 	if (lcp == NULL) {
1594 		/* Nothing available - try to set up a free page. */
1595 		if (lp->lp_cur == lp->lp_max) {
1596 			mutex_exit(&lp->lp_lock);
1597 			return ENOMEM;
1598 		}
1599 		lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1600 		if (lcp == NULL) {
1601 			mutex_exit(&lp->lp_lock);
1602 			return ENOMEM;
1603 		}
1604 		/*
1605 		 * Wire the next page down in kernel space.  Since this
1606 		 * is a new mapping, we must add a reference.
1607 		 */
1608 		uao = lp->lp_uao;
1609 		(*uao->pgops->pgo_reference)(uao);
1610 		lcp->lcp_kaddr = vm_map_min(kernel_map);
1611 		error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1612 		    uao, lp->lp_cur, PAGE_SIZE,
1613 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1614 		    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1615 		if (error != 0) {
1616 			mutex_exit(&lp->lp_lock);
1617 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1618 			(*uao->pgops->pgo_detach)(uao);
1619 			return error;
1620 		}
1621 		error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1622 		    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1623 		if (error != 0) {
1624 			mutex_exit(&lp->lp_lock);
1625 			uvm_unmap(kernel_map, lcp->lcp_kaddr,
1626 			    lcp->lcp_kaddr + PAGE_SIZE);
1627 			kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1628 			return error;
1629 		}
1630 		/* Prepare the page descriptor and link into the list. */
1631 		lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1632 		lp->lp_cur += PAGE_SIZE;
1633 		lcp->lcp_nfree = LWPCTL_PER_PAGE;
1634 		lcp->lcp_rotor = 0;
1635 		memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1636 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1637 	}
1638 	for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1639 		if (++i >= LWPCTL_BITMAP_ENTRIES)
1640 			i = 0;
1641 	}
1642 	bit = ffs(lcp->lcp_bitmap[i]) - 1;
1643 	lcp->lcp_bitmap[i] ^= (1 << bit);
1644 	lcp->lcp_rotor = i;
1645 	lcp->lcp_nfree--;
1646 	l->l_lcpage = lcp;
1647 	offset = (i << 5) + bit;
1648 	l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1649 	*uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1650 	mutex_exit(&lp->lp_lock);
1651 
1652 	KPREEMPT_DISABLE(l);
1653 	l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
1654 	KPREEMPT_ENABLE(l);
1655 
1656 	return 0;
1657 }
1658 
1659 /*
1660  * Free an lwpctl structure back to the per-process list.
1661  */
1662 void
1663 lwp_ctl_free(lwp_t *l)
1664 {
1665 	lcproc_t *lp;
1666 	lcpage_t *lcp;
1667 	u_int map, offset;
1668 
1669 	lp = l->l_proc->p_lwpctl;
1670 	KASSERT(lp != NULL);
1671 
1672 	lcp = l->l_lcpage;
1673 	offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1674 	KASSERT(offset < LWPCTL_PER_PAGE);
1675 
1676 	mutex_enter(&lp->lp_lock);
1677 	lcp->lcp_nfree++;
1678 	map = offset >> 5;
1679 	lcp->lcp_bitmap[map] |= (1 << (offset & 31));
1680 	if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1681 		lcp->lcp_rotor = map;
1682 	if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1683 		TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1684 		TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1685 	}
1686 	mutex_exit(&lp->lp_lock);
1687 }
1688 
1689 /*
1690  * Process is exiting; tear down lwpctl state.  This can only be safely
1691  * called by the last LWP in the process.
1692  */
1693 void
1694 lwp_ctl_exit(void)
1695 {
1696 	lcpage_t *lcp, *next;
1697 	lcproc_t *lp;
1698 	proc_t *p;
1699 	lwp_t *l;
1700 
1701 	l = curlwp;
1702 	l->l_lwpctl = NULL;
1703 	l->l_lcpage = NULL;
1704 	p = l->l_proc;
1705 	lp = p->p_lwpctl;
1706 
1707 	KASSERT(lp != NULL);
1708 	KASSERT(p->p_nlwps == 1);
1709 
1710 	for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
1711 		next = TAILQ_NEXT(lcp, lcp_chain);
1712 		uvm_unmap(kernel_map, lcp->lcp_kaddr,
1713 		    lcp->lcp_kaddr + PAGE_SIZE);
1714 		kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1715 	}
1716 
1717 	if (lp->lp_uao != NULL) {
1718 		uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
1719 		    lp->lp_uva + LWPCTL_UAREA_SZ);
1720 	}
1721 
1722 	mutex_destroy(&lp->lp_lock);
1723 	kmem_free(lp, sizeof(*lp));
1724 	p->p_lwpctl = NULL;
1725 }
1726 
1727 /*
1728  * Return the current LWP's "preemption counter".  Used to detect
1729  * preemption across operations that can tolerate preemption without
1730  * crashing, but which may generate incorrect results if preempted.
1731  */
1732 uint64_t
1733 lwp_pctr(void)
1734 {
1735 
1736 	return curlwp->l_ncsw;
1737 }
1738 
1739 #if defined(DDB)
1740 void
1741 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1742 {
1743 	lwp_t *l;
1744 
1745 	LIST_FOREACH(l, &alllwp, l_list) {
1746 		uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
1747 
1748 		if (addr < stack || stack + KSTACK_SIZE <= addr) {
1749 			continue;
1750 		}
1751 		(*pr)("%p is %p+%zu, LWP %p's stack\n",
1752 		    (void *)addr, (void *)stack,
1753 		    (size_t)(addr - stack), l);
1754 	}
1755 }
1756 #endif /* defined(DDB) */
1757