xref: /netbsd-src/sys/kern/kern_lwp.c (revision eb7c1594f145c931049e1fd9eb056a5987e87e59)
1 /*	$NetBSD: kern_lwp.c,v 1.12 2003/07/28 23:35:20 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.12 2003/07/28 23:35:20 matt Exp $");
41 
42 #include "opt_multiprocessor.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/pool.h>
47 #include <sys/lock.h>
48 #include <sys/proc.h>
49 #include <sys/sa.h>
50 #include <sys/savar.h>
51 #include <sys/types.h>
52 #include <sys/ucontext.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56 
57 #include <uvm/uvm_extern.h>
58 
59 struct lwplist alllwp;
60 struct lwplist deadlwp;
61 struct lwplist zomblwp;
62 
63 #define LWP_DEBUG
64 
65 #ifdef LWP_DEBUG
66 int lwp_debug = 0;
67 #define DPRINTF(x) if (lwp_debug) printf x
68 #else
69 #define DPRINTF(x)
70 #endif
71 /* ARGSUSED */
72 int
73 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
74 {
75 	struct sys__lwp_create_args /* {
76 		syscallarg(const ucontext_t *) ucp;
77 		syscallarg(u_long) flags;
78 		syscallarg(lwpid_t *) new_lwp;
79 	} */ *uap = v;
80 	struct proc *p = l->l_proc;
81 	struct lwp *l2;
82 	vaddr_t uaddr;
83 	boolean_t inmem;
84 	ucontext_t *newuc;
85 	int s, error;
86 
87 	newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
88 
89 	error = copyin(SCARG(uap, ucp), newuc, sizeof(*newuc));
90 	if (error)
91 		return (error);
92 
93 	/* XXX check against resource limits */
94 
95 	inmem = uvm_uarea_alloc(&uaddr);
96 	if (__predict_false(uaddr == 0)) {
97 		return (ENOMEM);
98 	}
99 
100 	/* XXX flags:
101 	 * __LWP_ASLWP is probably needed for Solaris compat.
102 	 */
103 
104 	newlwp(l, p, uaddr, inmem,
105 	    SCARG(uap, flags) & LWP_DETACHED,
106 	    NULL, 0, startlwp, newuc, &l2);
107 
108 	if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
109 		SCHED_LOCK(s);
110 		l2->l_stat = LSRUN;
111 		setrunqueue(l2);
112 		SCHED_UNLOCK(s);
113 		simple_lock(&p->p_lwplock);
114 		p->p_nrlwps++;
115 		simple_unlock(&p->p_lwplock);
116 	} else {
117 		l2->l_stat = LSSUSPENDED;
118 	}
119 
120 	error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
121 	    sizeof(l2->l_lid));
122 	if (error)
123 		return (error);
124 
125 	return (0);
126 }
127 
128 
129 int
130 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
131 {
132 
133 	lwp_exit(l);
134 	/* NOTREACHED */
135 	return (0);
136 }
137 
138 
139 int
140 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
141 {
142 
143 	*retval = l->l_lid;
144 
145 	return (0);
146 }
147 
148 
149 int
150 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
151 {
152 
153 	*retval = (uintptr_t) l->l_private;
154 
155 	return (0);
156 }
157 
158 
159 int
160 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
161 {
162 	struct sys__lwp_setprivate_args /* {
163 		syscallarg(void *) ptr;
164 	} */ *uap = v;
165 
166 	l->l_private = SCARG(uap, ptr);
167 
168 	return (0);
169 }
170 
171 
172 int
173 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
174 {
175 	struct sys__lwp_suspend_args /* {
176 		syscallarg(lwpid_t) target;
177 	} */ *uap = v;
178 	int target_lid;
179 	struct proc *p = l->l_proc;
180 	struct lwp *t, *t2;
181 	int s;
182 
183 	target_lid = SCARG(uap, target);
184 
185 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
186 		if (t->l_lid == target_lid)
187 			break;
188 
189 	if (t == NULL)
190 		return (ESRCH);
191 
192 	if (t == l) {
193 		/*
194 		 * Check for deadlock, which is only possible
195 		 * when we're suspending ourself.
196 		 */
197 		LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
198 			if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
199 				break;
200 		}
201 
202 		if (t2 == NULL) /* All other LWPs are suspended */
203 			return (EDEADLK);
204 
205 		SCHED_LOCK(s);
206 		l->l_stat = LSSUSPENDED;
207 		/* XXX NJWLWP check if this makes sense here: */
208 		l->l_proc->p_stats->p_ru.ru_nvcsw++;
209 		mi_switch(l, NULL);
210 		SCHED_ASSERT_UNLOCKED();
211 		splx(s);
212 	} else {
213 		switch (t->l_stat) {
214 		case LSSUSPENDED:
215 			return (0); /* _lwp_suspend() is idempotent */
216 		case LSRUN:
217 			SCHED_LOCK(s);
218 			remrunqueue(t);
219 			t->l_stat = LSSUSPENDED;
220 			SCHED_UNLOCK(s);
221 			simple_lock(&p->p_lwplock);
222 			p->p_nrlwps--;
223 			simple_unlock(&p->p_lwplock);
224 			break;
225 		case LSSLEEP:
226 			t->l_stat = LSSUSPENDED;
227 			break;
228 		case LSIDL:
229 		case LSDEAD:
230 		case LSZOMB:
231 			return (EINTR); /* It's what Solaris does..... */
232 		case LSSTOP:
233 			panic("_lwp_suspend: Stopped LWP in running process!");
234 			break;
235 		case LSONPROC:
236 			panic("XXX multiprocessor LWPs? Implement me!");
237 			break;
238 		}
239 	}
240 
241 	return (0);
242 }
243 
244 
245 int
246 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
247 {
248 	struct sys__lwp_continue_args /* {
249 		syscallarg(lwpid_t) target;
250 	} */ *uap = v;
251 	int target_lid;
252 	struct proc *p = l->l_proc;
253 	struct lwp *t;
254 
255 	target_lid = SCARG(uap, target);
256 
257 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
258 		if (t->l_lid == target_lid)
259 			break;
260 
261 	if (t == NULL)
262 		return (ESRCH);
263 
264 	lwp_continue(t);
265 
266 	return (0);
267 }
268 
269 void
270 lwp_continue(struct lwp *l)
271 {
272 	int s;
273 
274 	DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
275 	    l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
276 	    l->l_wchan));
277 
278 	if (l->l_stat != LSSUSPENDED)
279 		return;
280 
281 	if (l->l_wchan == 0) {
282 		/* LWP was runnable before being suspended. */
283 		SCHED_LOCK(s);
284 		setrunnable(l);
285 		SCHED_UNLOCK(s);
286 	} else {
287 		/* LWP was sleeping before being suspended. */
288 		l->l_stat = LSSLEEP;
289 	}
290 }
291 
292 int
293 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
294 {
295 	struct sys__lwp_wakeup_args /* {
296 		syscallarg(lwpid_t) wakeup;
297 	} */ *uap = v;
298 	lwpid_t target_lid;
299 	struct lwp *t;
300 	struct proc *p;
301 	int error;
302 	int s;
303 
304 	p = l->l_proc;
305 	target_lid = SCARG(uap, target);
306 
307 	SCHED_LOCK(s);
308 
309 
310 	LIST_FOREACH(t, &p->p_lwps, l_sibling)
311 		if (t->l_lid == target_lid)
312 			break;
313 
314 	if (t == NULL) {
315 		error = ESRCH;
316 		goto exit;
317 	}
318 
319 	if (t->l_stat != LSSLEEP) {
320 		error = ENODEV;
321 		goto exit;
322 	}
323 
324 	if ((t->l_flag & L_SINTR) == 0) {
325 		error = EBUSY;
326 		goto exit;
327 	}
328 	/*
329 	 * Tell ltsleep to wakeup.
330 	 */
331 	t->l_flag |= L_CANCELLED;
332 
333 	setrunnable(t);
334 	error = 0;
335 exit:
336 	SCHED_UNLOCK(s);
337 
338 
339 	return error;
340 }
341 
342 int
343 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
344 {
345 	struct sys__lwp_wait_args /* {
346 		syscallarg(lwpid_t) wait_for;
347 		syscallarg(lwpid_t *) departed;
348 	} */ *uap = v;
349 	int error;
350 	lwpid_t dep;
351 
352 	error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
353 	if (error)
354 		return (error);
355 
356 	if (SCARG(uap, departed)) {
357 		error = copyout(&dep, SCARG(uap, departed),
358 		    sizeof(dep));
359 		if (error)
360 			return (error);
361 	}
362 
363 	return (0);
364 }
365 
366 
367 int
368 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
369 {
370 
371 	struct proc *p = l->l_proc;
372 	struct lwp *l2, *l3;
373 	int nfound, error, s, wpri;
374 	static char waitstr1[] = "lwpwait";
375 	static char waitstr2[] = "lwpwait2";
376 
377 	DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
378 	    p->p_pid, l->l_lid, lid));
379 
380 	if (lid == l->l_lid)
381 		return (EDEADLK); /* Waiting for ourselves makes no sense. */
382 
383 	wpri = PWAIT |
384 	    ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
385  loop:
386 	nfound = 0;
387 	LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
388 		if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
389 		    ((lid != 0) && (lid != l2->l_lid)))
390 			continue;
391 
392 		nfound++;
393 		if (l2->l_stat == LSZOMB) {
394 			if (departed)
395 				*departed = l2->l_lid;
396 
397 			s = proclist_lock_write();
398 			LIST_REMOVE(l2, l_zlist); /* off zomblwp */
399 			proclist_unlock_write(s);
400 
401 			simple_lock(&p->p_lwplock);
402 			LIST_REMOVE(l2, l_sibling);
403 			p->p_nlwps--;
404 			p->p_nzlwps--;
405 			simple_unlock(&p->p_lwplock);
406 			/* XXX decrement limits */
407 
408 			pool_put(&lwp_pool, l2);
409 
410 			return (0);
411 		} else if (l2->l_stat == LSSLEEP ||
412 		           l2->l_stat == LSSUSPENDED) {
413 			/* Deadlock checks.
414 			 * 1. If all other LWPs are waiting for exits
415 			 *    or suspended, we would deadlock.
416 			 */
417 
418 			LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
419 				if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
420 				    !(l3->l_stat == LSSLEEP &&
421 					l3->l_wchan == (caddr_t) &p->p_nlwps))
422 					break;
423 			}
424 			if (l3 == NULL) /* Everyone else is waiting. */
425 				return (EDEADLK);
426 
427 			/* XXX we'd like to check for a cycle of waiting
428 			 * LWPs (specific LID waits, not any-LWP waits)
429 			 * and detect that sort of deadlock, but we don't
430 			 * have a good place to store the lwp that is
431 			 * being waited for. wchan is already filled with
432 			 * &p->p_nlwps, and putting the lwp address in
433 			 * there for deadlock tracing would require
434 			 * exiting LWPs to call wakeup on both their
435 			 * own address and &p->p_nlwps, to get threads
436 			 * sleeping on any LWP exiting.
437 			 *
438 			 * Revisit later. Maybe another auxillary
439 			 * storage location associated with sleeping
440 			 * is in order.
441 			 */
442 		}
443 	}
444 
445 	if (nfound == 0)
446 		return (ESRCH);
447 
448 	if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
449 	    (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
450 		return (error);
451 
452 	goto loop;
453 }
454 
455 
456 int
457 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
458     int flags, void *stack, size_t stacksize,
459     void (*func)(void *), void *arg, struct lwp **rnewlwpp)
460 {
461 	struct lwp *l2;
462 	int s;
463 
464 	l2 = pool_get(&lwp_pool, PR_WAITOK);
465 
466 	l2->l_stat = LSIDL;
467 	l2->l_forw = l2->l_back = NULL;
468 	l2->l_proc = p2;
469 
470 
471 	memset(&l2->l_startzero, 0,
472 	       (unsigned) ((caddr_t)&l2->l_endzero -
473 			   (caddr_t)&l2->l_startzero));
474 	memcpy(&l2->l_startcopy, &l1->l_startcopy,
475 	       (unsigned) ((caddr_t)&l2->l_endcopy -
476 			   (caddr_t)&l2->l_startcopy));
477 
478 #if !defined(MULTIPROCESSOR)
479 	/*
480 	 * In the single-processor case, all processes will always run
481 	 * on the same CPU.  So, initialize the child's CPU to the parent's
482 	 * now.  In the multiprocessor case, the child's CPU will be
483 	 * initialized in the low-level context switch code when the
484 	 * process runs.
485 	 */
486 	KASSERT(l1->l_cpu != NULL);
487 	l2->l_cpu = l1->l_cpu;
488 #else
489 	/*
490 	 * zero child's cpu pointer so we don't get trash.
491 	 */
492 	l2->l_cpu = NULL;
493 #endif /* ! MULTIPROCESSOR */
494 
495 	l2->l_flag = inmem ? L_INMEM : 0;
496 	l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
497 
498 	callout_init(&l2->l_tsleep_ch);
499 
500 	if (rnewlwpp != NULL)
501 		*rnewlwpp = l2;
502 
503 	l2->l_addr = (struct user *)uaddr;
504 	uvm_lwp_fork(l1, l2, stack, stacksize, func,
505 	    (arg != NULL) ? arg : l2);
506 
507 
508 	simple_lock(&p2->p_lwplock);
509 	l2->l_lid = ++p2->p_nlwpid;
510 	LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
511 	p2->p_nlwps++;
512 	simple_unlock(&p2->p_lwplock);
513 
514 	/* XXX should be locked differently... */
515 	s = proclist_lock_write();
516 	LIST_INSERT_HEAD(&alllwp, l2, l_list);
517 	proclist_unlock_write(s);
518 
519 	return (0);
520 }
521 
522 
523 /*
524  * Quit the process. This will call cpu_exit, which will call cpu_switch,
525  * so this can only be used meaningfully if you're willing to switch away.
526  * Calling with l!=curlwp would be weird.
527  */
528 void
529 lwp_exit(struct lwp *l)
530 {
531 	struct proc *p = l->l_proc;
532 	int s;
533 
534 	DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
535 	DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
536 	    p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
537 
538 	/*
539 	 * If we are the last live LWP in a process, we need to exit
540 	 * the entire process (if that's not already going on). We do
541 	 * so with an exit status of zero, because it's a "controlled"
542 	 * exit, and because that's what Solaris does.
543 	 */
544 	if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
545 		DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
546 		    p->p_pid, l->l_lid));
547 		exit1(l, 0);
548 	}
549 
550 	s = proclist_lock_write();
551 	LIST_REMOVE(l, l_list);
552 	if ((l->l_flag & L_DETACHED) == 0) {
553 		DPRINTF(("lwp_exit: %d.%d going on zombie list\n", p->p_pid,
554 		    l->l_lid));
555 		LIST_INSERT_HEAD(&zomblwp, l, l_zlist);
556 	}
557 	proclist_unlock_write(s);
558 
559 	simple_lock(&p->p_lwplock);
560 	p->p_nrlwps--;
561 	simple_unlock(&p->p_lwplock);
562 
563 	l->l_stat = LSDEAD;
564 
565 	/* This LWP no longer needs to hold the kernel lock. */
566 	KERNEL_PROC_UNLOCK(l);
567 
568 	/* cpu_exit() will not return */
569 	cpu_exit(l, 0);
570 
571 }
572 
573 
574 void
575 lwp_exit2(struct lwp *l)
576 {
577 
578 	simple_lock(&deadproc_slock);
579 	LIST_INSERT_HEAD(&deadlwp, l, l_list);
580 	simple_unlock(&deadproc_slock);
581 
582 	wakeup(&deadprocs);
583 }
584 
585 /*
586  * Pick a LWP to represent the process for those operations which
587  * want information about a "process" that is actually associated
588  * with a LWP.
589  */
590 struct lwp *
591 proc_representative_lwp(p)
592 	struct proc *p;
593 {
594 	struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
595 
596 	/* Trivial case: only one LWP */
597 	if (p->p_nlwps == 1)
598 		return (LIST_FIRST(&p->p_lwps));
599 
600 	switch (p->p_stat) {
601 	case SSTOP:
602 	case SACTIVE:
603 		/* Pick the most live LWP */
604 		onproc = running = sleeping = stopped = suspended = NULL;
605 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
606 			switch (l->l_stat) {
607 			case LSONPROC:
608 				onproc = l;
609 				break;
610 			case LSRUN:
611 				running = l;
612 				break;
613 			case LSSLEEP:
614 				sleeping = l;
615 				break;
616 			case LSSTOP:
617 				stopped = l;
618 				break;
619 			case LSSUSPENDED:
620 				suspended = l;
621 				break;
622 			}
623 		}
624 		if (onproc)
625 			return onproc;
626 		if (running)
627 			return running;
628 		if (sleeping)
629 			return sleeping;
630 		if (stopped)
631 			return stopped;
632 		if (suspended)
633 			return suspended;
634 		break;
635 	case SDEAD:
636 	case SZOMB:
637 		/* Doesn't really matter... */
638 		return (LIST_FIRST(&p->p_lwps));
639 		break;
640 #ifdef DIAGNOSTIC
641 	case SIDL:
642 		/* We have more than one LWP and we're in SIDL?
643 		 * How'd that happen?
644 		 */
645 		panic("Too many LWPs (%d) in SIDL process %d (%s)",
646 		    p->p_nrlwps, p->p_pid, p->p_comm);
647 	default:
648 		panic("Process %d (%s) in unknown state %d",
649 		    p->p_pid, p->p_comm, p->p_stat);
650 #endif
651 	}
652 
653 	panic("proc_representative_lwp: couldn't find a lwp for process"
654 		" %d (%s)", p->p_pid, p->p_comm);
655 	/* NOTREACHED */
656 	return NULL;
657 }
658