xref: /netbsd-src/sys/kern/kern_lwp.c (revision c0179c282a5968435315a82f4128c61372c68fc3)
1 /*	$NetBSD: kern_lwp.c,v 1.49 2006/11/03 19:46:03 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.49 2006/11/03 19:46:03 ad Exp $");
41 
42 #include "opt_multiprocessor.h"
43 
44 #define _LWP_API_PRIVATE
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/pool.h>
49 #include <sys/lock.h>
50 #include <sys/proc.h>
51 #include <sys/sa.h>
52 #include <sys/savar.h>
53 #include <sys/types.h>
54 #include <sys/ucontext.h>
55 #include <sys/resourcevar.h>
56 #include <sys/mount.h>
57 #include <sys/syscallargs.h>
58 #include <sys/kauth.h>
59 
60 #include <uvm/uvm_extern.h>
61 
62 POOL_INIT(lwp_pool, sizeof(struct lwp), 0, 0, 0, "lwppl",
63     &pool_allocator_nointr);
64 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
65     &pool_allocator_nointr);
66 
67 static specificdata_domain_t lwp_specificdata_domain;
68 
69 struct lwplist alllwp;
70 
71 #define LWP_DEBUG
72 
73 #ifdef LWP_DEBUG
74 int lwp_debug = 0;
75 #define DPRINTF(x) if (lwp_debug) printf x
76 #else
77 #define DPRINTF(x)
78 #endif
79 
80 void
81 lwpinit(void)
82 {
83 
84 	lwp_specificdata_domain = specificdata_domain_create();
85 	KASSERT(lwp_specificdata_domain != NULL);
86 }
87 
88 /* ARGSUSED */
89 int
90 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
91 {
92 	struct sys__lwp_create_args /* {
93 		syscallarg(const ucontext_t *) ucp;
94 		syscallarg(u_long) flags;
95 		syscallarg(lwpid_t *) new_lwp;
96 	} */ *uap = v;
97 	struct proc *p = l->l_proc;
98 	struct lwp *l2;
99 	vaddr_t uaddr;
100 	boolean_t inmem;
101 	ucontext_t *newuc;
102 	int s, error;
103 
104 	if (p->p_flag & P_SA)
105 		return EINVAL;
106 
107 	newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
108 
109 	error = copyin(SCARG(uap, ucp), newuc,
110 	    l->l_proc->p_emul->e_sa->sae_ucsize);
111 	if (error) {
112 		pool_put(&lwp_uc_pool, newuc);
113 		return (error);
114 	}
115 
116 	/* XXX check against resource limits */
117 
118 	inmem = uvm_uarea_alloc(&uaddr);
119 	if (__predict_false(uaddr == 0)) {
120 		pool_put(&lwp_uc_pool, newuc);
121 		return (ENOMEM);
122 	}
123 
124 	/* XXX flags:
125 	 * __LWP_ASLWP is probably needed for Solaris compat.
126 	 */
127 
128 	newlwp(l, p, uaddr, inmem,
129 	    SCARG(uap, flags) & LWP_DETACHED,
130 	    NULL, 0, startlwp, newuc, &l2);
131 
132 	if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
133 		SCHED_LOCK(s);
134 		l2->l_stat = LSRUN;
135 		setrunqueue(l2);
136 		p->p_nrlwps++;
137 		SCHED_UNLOCK(s);
138 	} else {
139 		l2->l_stat = LSSUSPENDED;
140 	}
141 
142 	error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
143 	    sizeof(l2->l_lid));
144 	if (error) {
145 		/* XXX We should destroy the LWP. */
146 		return (error);
147 	}
148 
149 	return (0);
150 }
151 
152 
153 int
154 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
155 {
156 
157 	lwp_exit(l);
158 	/* NOTREACHED */
159 	return (0);
160 }
161 
162 
163 int
164 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
165 {
166 
167 	*retval = l->l_lid;
168 
169 	return (0);
170 }
171 
172 
173 int
174 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
175 {
176 
177 	*retval = (uintptr_t) l->l_private;
178 
179 	return (0);
180 }
181 
182 
183 int
184 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
185 {
186 	struct sys__lwp_setprivate_args /* {
187 		syscallarg(void *) ptr;
188 	} */ *uap = v;
189 
190 	l->l_private = SCARG(uap, ptr);
191 
192 	return (0);
193 }
194 
195 
196 int
197 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
198 {
199 	struct sys__lwp_suspend_args /* {
200 		syscallarg(lwpid_t) target;
201 	} */ *uap = v;
202 	int target_lid;
203 	struct proc *p = l->l_proc;
204 	struct lwp *t;
205 	struct lwp *t2;
206 
207 	if (p->p_flag & P_SA)
208 		return EINVAL;
209 
210 	target_lid = SCARG(uap, target);
211 
212 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
213 		if (t->l_lid == target_lid)
214 			break;
215 
216 	if (t == NULL)
217 		return (ESRCH);
218 
219 	if (t == l) {
220 		/*
221 		 * Check for deadlock, which is only possible
222 		 * when we're suspending ourself.
223 		 */
224 		LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
225 			if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
226 				break;
227 		}
228 
229 		if (t2 == NULL) /* All other LWPs are suspended */
230 			return (EDEADLK);
231 	}
232 
233 	return lwp_suspend(l, t);
234 }
235 
236 inline int
237 lwp_suspend(struct lwp *l, struct lwp *t)
238 {
239 	struct proc *p = t->l_proc;
240 	int s;
241 
242 	if (t == l) {
243 		SCHED_LOCK(s);
244 		KASSERT(l->l_stat == LSONPROC);
245 		l->l_stat = LSSUSPENDED;
246 		p->p_nrlwps--;
247 		/* XXX NJWLWP check if this makes sense here: */
248 		p->p_stats->p_ru.ru_nvcsw++;
249 		mi_switch(l, NULL);
250 		SCHED_ASSERT_UNLOCKED();
251 		splx(s);
252 	} else {
253 		switch (t->l_stat) {
254 		case LSSUSPENDED:
255 			return (0); /* _lwp_suspend() is idempotent */
256 		case LSRUN:
257 			SCHED_LOCK(s);
258 			remrunqueue(t);
259 			t->l_stat = LSSUSPENDED;
260 			p->p_nrlwps--;
261 			SCHED_UNLOCK(s);
262 			break;
263 		case LSSLEEP:
264 			t->l_stat = LSSUSPENDED;
265 			break;
266 		case LSIDL:
267 		case LSZOMB:
268 			return (EINTR); /* It's what Solaris does..... */
269 		case LSSTOP:
270 			panic("_lwp_suspend: Stopped LWP in running process!");
271 			break;
272 		case LSONPROC:
273 			/* XXX multiprocessor LWPs? Implement me! */
274 			return (EINVAL);
275 		}
276 	}
277 
278 	return (0);
279 }
280 
281 
282 int
283 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
284 {
285 	struct sys__lwp_continue_args /* {
286 		syscallarg(lwpid_t) target;
287 	} */ *uap = v;
288 	int s, target_lid;
289 	struct proc *p = l->l_proc;
290 	struct lwp *t;
291 
292 	if (p->p_flag & P_SA)
293 		return EINVAL;
294 
295 	target_lid = SCARG(uap, target);
296 
297 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
298 		if (t->l_lid == target_lid)
299 			break;
300 
301 	if (t == NULL)
302 		return (ESRCH);
303 
304 	SCHED_LOCK(s);
305 	lwp_continue(t);
306 	SCHED_UNLOCK(s);
307 
308 	return (0);
309 }
310 
311 void
312 lwp_continue(struct lwp *l)
313 {
314 
315 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
316 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
317 	    l->l_wchan));
318 
319 	if (l->l_stat != LSSUSPENDED)
320 		return;
321 
322 	if (l->l_wchan == 0) {
323 		/* LWP was runnable before being suspended. */
324 		setrunnable(l);
325 	} else {
326 		/* LWP was sleeping before being suspended. */
327 		l->l_stat = LSSLEEP;
328 	}
329 }
330 
331 int
332 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
333 {
334 	struct sys__lwp_wakeup_args /* {
335 		syscallarg(lwpid_t) target;
336 	} */ *uap = v;
337 	lwpid_t target_lid;
338 	struct lwp *t;
339 	struct proc *p;
340 	int error;
341 	int s;
342 
343 	p = l->l_proc;
344 	target_lid = SCARG(uap, target);
345 
346 	SCHED_LOCK(s);
347 
348 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
349 		if (t->l_lid == target_lid)
350 			break;
351 
352 	if (t == NULL) {
353 		error = ESRCH;
354 		goto exit;
355 	}
356 
357 	if (t->l_stat != LSSLEEP) {
358 		error = ENODEV;
359 		goto exit;
360 	}
361 
362 	if ((t->l_flag & L_SINTR) == 0) {
363 		error = EBUSY;
364 		goto exit;
365 	}
366 	/*
367 	 * Tell ltsleep to wakeup.
368 	 */
369 	t->l_flag |= L_CANCELLED;
370 
371 	setrunnable(t);
372 	error = 0;
373 exit:
374 	SCHED_UNLOCK(s);
375 
376 	return error;
377 }
378 
379 int
380 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
381 {
382 	struct sys__lwp_wait_args /* {
383 		syscallarg(lwpid_t) wait_for;
384 		syscallarg(lwpid_t *) departed;
385 	} */ *uap = v;
386 	int error;
387 	lwpid_t dep;
388 
389 	error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
390 	if (error)
391 		return (error);
392 
393 	if (SCARG(uap, departed)) {
394 		error = copyout(&dep, SCARG(uap, departed),
395 		    sizeof(dep));
396 		if (error)
397 			return (error);
398 	}
399 
400 	return (0);
401 }
402 
403 
404 int
405 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
406 {
407 	struct proc *p = l->l_proc;
408 	struct lwp *l2, *l3;
409 	int nfound, error, wpri;
410 	static const char waitstr1[] = "lwpwait";
411 	static const char waitstr2[] = "lwpwait2";
412 
413 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
414 	    p->p_pid, l->l_lid, lid));
415 
416 	if (lid == l->l_lid)
417 		return (EDEADLK); /* Waiting for ourselves makes no sense. */
418 
419 	wpri = PWAIT |
420 	    ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
421  loop:
422 	nfound = 0;
423 	LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
424 		if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
425 		    ((lid != 0) && (lid != l2->l_lid)))
426 			continue;
427 
428 		nfound++;
429 		if (l2->l_stat == LSZOMB) {
430 			if (departed)
431 				*departed = l2->l_lid;
432 
433 			simple_lock(&p->p_lock);
434 			LIST_REMOVE(l2, l_sibling);
435 			p->p_nlwps--;
436 			p->p_nzlwps--;
437 			simple_unlock(&p->p_lock);
438 			/* XXX decrement limits */
439 
440 			pool_put(&lwp_pool, l2);
441 
442 			return (0);
443 		} else if (l2->l_stat == LSSLEEP ||
444 		           l2->l_stat == LSSUSPENDED) {
445 			/* Deadlock checks.
446 			 * 1. If all other LWPs are waiting for exits
447 			 *    or suspended, we would deadlock.
448 			 */
449 
450 			LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
451 				if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
452 				    !(l3->l_stat == LSSLEEP &&
453 					l3->l_wchan == (caddr_t) &p->p_nlwps))
454 					break;
455 			}
456 			if (l3 == NULL) /* Everyone else is waiting. */
457 				return (EDEADLK);
458 
459 			/* XXX we'd like to check for a cycle of waiting
460 			 * LWPs (specific LID waits, not any-LWP waits)
461 			 * and detect that sort of deadlock, but we don't
462 			 * have a good place to store the lwp that is
463 			 * being waited for. wchan is already filled with
464 			 * &p->p_nlwps, and putting the lwp address in
465 			 * there for deadlock tracing would require
466 			 * exiting LWPs to call wakeup on both their
467 			 * own address and &p->p_nlwps, to get threads
468 			 * sleeping on any LWP exiting.
469 			 *
470 			 * Revisit later. Maybe another auxillary
471 			 * storage location associated with sleeping
472 			 * is in order.
473 			 */
474 		}
475 	}
476 
477 	if (nfound == 0)
478 		return (ESRCH);
479 
480 	if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
481 	    (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
482 		return (error);
483 
484 	goto loop;
485 }
486 
487 
488 int
489 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
490     int flags, void *stack, size_t stacksize,
491     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
492 {
493 	struct lwp *l2;
494 	int s;
495 
496 	l2 = pool_get(&lwp_pool, PR_WAITOK);
497 
498 	l2->l_stat = LSIDL;
499 	l2->l_forw = l2->l_back = NULL;
500 	l2->l_proc = p2;
501 
502 	lwp_initspecific(l2);
503 
504 	memset(&l2->l_startzero, 0,
505 	       (unsigned) ((caddr_t)&l2->l_endzero -
506 			   (caddr_t)&l2->l_startzero));
507 	memcpy(&l2->l_startcopy, &l1->l_startcopy,
508 	       (unsigned) ((caddr_t)&l2->l_endcopy -
509 			   (caddr_t)&l2->l_startcopy));
510 
511 #if !defined(MULTIPROCESSOR)
512 	/*
513 	 * In the single-processor case, all processes will always run
514 	 * on the same CPU.  So, initialize the child's CPU to the parent's
515 	 * now.  In the multiprocessor case, the child's CPU will be
516 	 * initialized in the low-level context switch code when the
517 	 * process runs.
518 	 */
519 	KASSERT(l1->l_cpu != NULL);
520 	l2->l_cpu = l1->l_cpu;
521 #else
522 	/*
523 	 * zero child's CPU pointer so we don't get trash.
524 	 */
525 	l2->l_cpu = NULL;
526 #endif /* ! MULTIPROCESSOR */
527 
528 	l2->l_flag = inmem ? L_INMEM : 0;
529 	l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
530 
531 	lwp_update_creds(l2);
532 	callout_init(&l2->l_tsleep_ch);
533 
534 	if (rnewlwpp != NULL)
535 		*rnewlwpp = l2;
536 
537 	l2->l_addr = UAREA_TO_USER(uaddr);
538 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
539 	    (arg != NULL) ? arg : l2);
540 
541 	simple_lock(&p2->p_lock);
542 	l2->l_lid = ++p2->p_nlwpid;
543 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
544 	p2->p_nlwps++;
545 	simple_unlock(&p2->p_lock);
546 
547 	/* XXX should be locked differently... */
548 	s = proclist_lock_write();
549 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
550 	proclist_unlock_write(s);
551 
552 	if (p2->p_emul->e_lwp_fork)
553 		(*p2->p_emul->e_lwp_fork)(l1, l2);
554 
555 	return (0);
556 }
557 
558 
559 /*
560  * Quit the process. This will call cpu_exit, which will call cpu_switch,
561  * so this can only be used meaningfully if you're willing to switch away.
562  * Calling with l!=curlwp would be weird.
563  */
564 void
565 lwp_exit(struct lwp *l)
566 {
567 	struct proc *p = l->l_proc;
568 	int s;
569 
570 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
571 	DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
572 	    p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
573 
574 	if (p->p_emul->e_lwp_exit)
575 		(*p->p_emul->e_lwp_exit)(l);
576 
577 	/*
578 	 * If we are the last live LWP in a process, we need to exit
579 	 * the entire process (if that's not already going on). We do
580 	 * so with an exit status of zero, because it's a "controlled"
581 	 * exit, and because that's what Solaris does.
582 	 *
583 	 * Note: the last LWP's specificdata will be deleted here.
584 	 */
585 	if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
586 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
587 		    p->p_pid, l->l_lid));
588 		exit1(l, 0);
589 		/* NOTREACHED */
590 	}
591 
592 	/* Delete the specificdata while it's still safe to sleep. */
593 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
594 
595 	s = proclist_lock_write();
596 	LIST_REMOVE(l, l_list);
597 	proclist_unlock_write(s);
598 
599 	/*
600 	 * Release our cached credentials, and collate accounting flags.
601 	 */
602 	kauth_cred_free(l->l_cred);
603 	simple_lock(&p->p_lock);
604 	p->p_acflag |= l->l_acflag;
605 	simple_unlock(&p->p_lock);
606 
607 	/* Free MD LWP resources */
608 #ifndef __NO_CPU_LWP_FREE
609 	cpu_lwp_free(l, 0);
610 #endif
611 
612 	pmap_deactivate(l);
613 
614 	if (l->l_flag & L_DETACHED) {
615 		simple_lock(&p->p_lock);
616 		LIST_REMOVE(l, l_sibling);
617 		p->p_nlwps--;
618 		simple_unlock(&p->p_lock);
619 
620 		curlwp = NULL;
621 		l->l_proc = NULL;
622 	}
623 
624 	SCHED_LOCK(s);
625 	p->p_nrlwps--;
626 	l->l_stat = LSDEAD;
627 	SCHED_UNLOCK(s);
628 
629 	/* This LWP no longer needs to hold the kernel lock. */
630 	KERNEL_PROC_UNLOCK(l);
631 
632 	/* cpu_exit() will not return */
633 	cpu_exit(l);
634 }
635 
636 /*
637  * We are called from cpu_exit() once it is safe to schedule the
638  * dead process's resources to be freed (i.e., once we've switched to
639  * the idle PCB for the current CPU).
640  *
641  * NOTE: One must be careful with locking in this routine.  It's
642  * called from a critical section in machine-dependent code, so
643  * we should refrain from changing any interrupt state.
644  */
645 void
646 lwp_exit2(struct lwp *l)
647 {
648 	struct proc *p;
649 
650 	KERNEL_LOCK(LK_EXCLUSIVE);
651 	/*
652 	 * Free the VM resources we're still holding on to.
653 	 */
654 	uvm_lwp_exit(l);
655 
656 	if (l->l_flag & L_DETACHED) {
657 		/* Nobody waits for detached LWPs. */
658 		pool_put(&lwp_pool, l);
659 		KERNEL_UNLOCK();
660 	} else {
661 		l->l_stat = LSZOMB;
662 		p = l->l_proc;
663 		p->p_nzlwps++;
664 		wakeup(&p->p_nlwps);
665 		KERNEL_UNLOCK();
666 	}
667 }
668 
669 /*
670  * Pick a LWP to represent the process for those operations which
671  * want information about a "process" that is actually associated
672  * with a LWP.
673  */
674 struct lwp *
675 proc_representative_lwp(struct proc *p)
676 {
677 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
678 	struct lwp *signalled;
679 
680 	/* Trivial case: only one LWP */
681 	if (p->p_nlwps == 1)
682 		return (LIST_FIRST(&p->p_lwps));
683 
684 	switch (p->p_stat) {
685 	case SSTOP:
686 	case SACTIVE:
687 		/* Pick the most live LWP */
688 		onproc = running = sleeping = stopped = suspended = NULL;
689 		signalled = NULL;
690 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
691 			if (l->l_lid == p->p_sigctx.ps_lwp)
692 				signalled = l;
693 			switch (l->l_stat) {
694 			case LSONPROC:
695 				onproc = l;
696 				break;
697 			case LSRUN:
698 				running = l;
699 				break;
700 			case LSSLEEP:
701 				sleeping = l;
702 				break;
703 			case LSSTOP:
704 				stopped = l;
705 				break;
706 			case LSSUSPENDED:
707 				suspended = l;
708 				break;
709 			}
710 		}
711 		if (signalled)
712 			return signalled;
713 		if (onproc)
714 			return onproc;
715 		if (running)
716 			return running;
717 		if (sleeping)
718 			return sleeping;
719 		if (stopped)
720 			return stopped;
721 		if (suspended)
722 			return suspended;
723 		break;
724 	case SZOMB:
725 		/* Doesn't really matter... */
726 		return (LIST_FIRST(&p->p_lwps));
727 #ifdef DIAGNOSTIC
728 	case SIDL:
729 		/* We have more than one LWP and we're in SIDL?
730 		 * How'd that happen?
731 		 */
732 		panic("Too many LWPs (%d) in SIDL process %d (%s)",
733 		    p->p_nrlwps, p->p_pid, p->p_comm);
734 	default:
735 		panic("Process %d (%s) in unknown state %d",
736 		    p->p_pid, p->p_comm, p->p_stat);
737 #endif
738 	}
739 
740 	panic("proc_representative_lwp: couldn't find a lwp for process"
741 		" %d (%s)", p->p_pid, p->p_comm);
742 	/* NOTREACHED */
743 	return NULL;
744 }
745 
746 /*
747  * Update an LWP's cached credentials to mirror the process' master copy.
748  *
749  * This happens early in the syscall path, on user trap, and on LWP
750  * creation.  A long-running LWP can also voluntarily choose to update
751  * it's credentials by calling this routine.  This may be called from
752  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
753  */
754 void
755 lwp_update_creds(struct lwp *l)
756 {
757 	kauth_cred_t oc;
758 	struct proc *p;
759 
760 	p = l->l_proc;
761 	oc = l->l_cred;
762 
763 	simple_lock(&p->p_lock);
764 	kauth_cred_hold(p->p_cred);
765 	l->l_cred = p->p_cred;
766 	simple_unlock(&p->p_lock);
767 	if (oc != NULL)
768 		kauth_cred_free(oc);
769 }
770 
771 /*
772  * lwp_specific_key_create --
773  *	Create a key for subsystem lwp-specific data.
774  */
775 int
776 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
777 {
778 
779 	return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
780 }
781 
782 /*
783  * lwp_specific_key_delete --
784  *	Delete a key for subsystem lwp-specific data.
785  */
786 void
787 lwp_specific_key_delete(specificdata_key_t key)
788 {
789 
790 	specificdata_key_delete(lwp_specificdata_domain, key);
791 }
792 
793 /*
794  * lwp_initspecific --
795  *	Initialize an LWP's specificdata container.
796  */
797 void
798 lwp_initspecific(struct lwp *l)
799 {
800 	int error;
801 
802 	error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
803 	KASSERT(error == 0);
804 }
805 
806 /*
807  * lwp_finispecific --
808  *	Finalize an LWP's specificdata container.
809  */
810 void
811 lwp_finispecific(struct lwp *l)
812 {
813 
814 	specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
815 }
816 
817 /*
818  * lwp_getspecific --
819  *	Return lwp-specific data corresponding to the specified key.
820  *
821  *	Note: LWP specific data is NOT INTERLOCKED.  An LWP should access
822  *	only its OWN SPECIFIC DATA.  If it is necessary to access another
823  *	LWP's specifc data, care must be taken to ensure that doing so
824  *	would not cause internal data structure inconsistency (i.e. caller
825  *	can guarantee that the target LWP is not inside an lwp_getspecific()
826  *	or lwp_setspecific() call).
827  */
828 void *
829 lwp_getspecific(specificdata_key_t key)
830 {
831 
832 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
833 						  &curlwp->l_specdataref, key));
834 }
835 
836 void *
837 _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
838 {
839 
840 	return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
841 						  &l->l_specdataref, key));
842 }
843 
844 /*
845  * lwp_setspecific --
846  *	Set lwp-specific data corresponding to the specified key.
847  */
848 void
849 lwp_setspecific(specificdata_key_t key, void *data)
850 {
851 
852 	specificdata_setspecific(lwp_specificdata_domain,
853 				 &curlwp->l_specdataref, key, data);
854 }
855