xref: /netbsd-src/sys/kern/sys_lwp.c (revision ce099b40997c43048fb78bd578195f81d2456523)
1 /*	$NetBSD: sys_lwp.c,v 1.40 2008/04/28 20:24:04 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Lightweight process (LWP) system calls.  See kern_lwp.c for a description
34  * of LWPs.
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.40 2008/04/28 20:24:04 martin Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/types.h>
45 #include <sys/syscallargs.h>
46 #include <sys/kauth.h>
47 #include <sys/kmem.h>
48 #include <sys/sleepq.h>
49 #include <sys/lwpctl.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #define	LWP_UNPARK_MAX		1024
54 
55 syncobj_t lwp_park_sobj = {
56 	SOBJ_SLEEPQ_LIFO,
57 	sleepq_unsleep,
58 	sleepq_changepri,
59 	sleepq_lendpri,
60 	syncobj_noowner,
61 };
62 
63 sleeptab_t	lwp_park_tab;
64 
65 void
66 lwp_sys_init(void)
67 {
68 	sleeptab_init(&lwp_park_tab);
69 }
70 
71 /* ARGSUSED */
72 int
73 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval)
74 {
75 	/* {
76 		syscallarg(const ucontext_t *) ucp;
77 		syscallarg(u_long) flags;
78 		syscallarg(lwpid_t *) new_lwp;
79 	} */
80 	struct proc *p = l->l_proc;
81 	struct lwp *l2;
82 	vaddr_t uaddr;
83 	bool inmem;
84 	ucontext_t *newuc;
85 	int error, lid;
86 
87 	newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
88 
89 	error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
90 	if (error) {
91 		pool_put(&lwp_uc_pool, newuc);
92 		return error;
93 	}
94 
95 	/* XXX check against resource limits */
96 
97 	inmem = uvm_uarea_alloc(&uaddr);
98 	if (__predict_false(uaddr == 0)) {
99 		pool_put(&lwp_uc_pool, newuc);
100 		return ENOMEM;
101 	}
102 
103 	error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED,
104 	    NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class);
105 	if (error) {
106 		uvm_uarea_free(uaddr, curcpu());
107 		pool_put(&lwp_uc_pool, newuc);
108 		return error;
109 	}
110 
111 	lid = l2->l_lid;
112 	error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
113 	if (error) {
114 		lwp_exit(l2);
115 		pool_put(&lwp_uc_pool, newuc);
116 		return error;
117 	}
118 
119 	/*
120 	 * Set the new LWP running, unless the caller has requested that
121 	 * it be created in suspended state.  If the process is stopping,
122 	 * then the LWP is created stopped.
123 	 */
124 	mutex_enter(p->p_lock);
125 	lwp_lock(l2);
126 	if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 &&
127 	    (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
128 	    	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0)
129 	    		l2->l_stat = LSSTOP;
130 		else {
131 			KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex));
132 			p->p_nrlwps++;
133 			l2->l_stat = LSRUN;
134 			sched_enqueue(l2, false);
135 		}
136 		lwp_unlock(l2);
137 	} else {
138 		l2->l_stat = LSSUSPENDED;
139 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock);
140 	}
141 	mutex_exit(p->p_lock);
142 
143 	return 0;
144 }
145 
146 int
147 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
148 {
149 
150 	lwp_exit(l);
151 	return 0;
152 }
153 
154 int
155 sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
156 {
157 
158 	*retval = l->l_lid;
159 	return 0;
160 }
161 
162 int
163 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
164 {
165 
166 	*retval = (uintptr_t)l->l_private;
167 	return 0;
168 }
169 
170 int
171 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, register_t *retval)
172 {
173 	/* {
174 		syscallarg(void *) ptr;
175 	} */
176 
177 	l->l_private = SCARG(uap, ptr);
178 	return 0;
179 }
180 
181 int
182 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, register_t *retval)
183 {
184 	/* {
185 		syscallarg(lwpid_t) target;
186 	} */
187 	struct proc *p = l->l_proc;
188 	struct lwp *t;
189 	int error;
190 
191 	mutex_enter(p->p_lock);
192 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
193 		mutex_exit(p->p_lock);
194 		return ESRCH;
195 	}
196 
197 	/*
198 	 * Check for deadlock, which is only possible when we're suspending
199 	 * ourself.  XXX There is a short race here, as p_nrlwps is only
200 	 * incremented when an LWP suspends itself on the kernel/user
201 	 * boundary.  It's still possible to kill -9 the process so we
202 	 * don't bother checking further.
203 	 */
204 	lwp_lock(t);
205 	if ((t == l && p->p_nrlwps == 1) ||
206 	    (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
207 		lwp_unlock(t);
208 		mutex_exit(p->p_lock);
209 		return EDEADLK;
210 	}
211 
212 	/*
213 	 * Suspend the LWP.  XXX If it's on a different CPU, we should wait
214 	 * for it to be preempted, where it will put itself to sleep.
215 	 *
216 	 * Suspension of the current LWP will happen on return to userspace.
217 	 */
218 	error = lwp_suspend(l, t);
219 	if (error) {
220 		mutex_exit(p->p_lock);
221 		return error;
222 	}
223 
224 	/*
225 	 * Wait for:
226 	 *  o process exiting
227 	 *  o target LWP suspended
228 	 *  o target LWP not suspended and L_WSUSPEND clear
229 	 *  o target LWP exited
230 	 */
231 	for (;;) {
232 		error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
233 		if (error) {
234 			error = ERESTART;
235 			break;
236 		}
237 		if (lwp_find(p, SCARG(uap, target)) == NULL) {
238 			error = ESRCH;
239 			break;
240 		}
241 		if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
242 			error = ERESTART;
243 			break;
244 		}
245 		if (t->l_stat == LSSUSPENDED ||
246 		    (t->l_flag & LW_WSUSPEND) == 0)
247 			break;
248 	}
249 	mutex_exit(p->p_lock);
250 
251 	return error;
252 }
253 
254 int
255 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, register_t *retval)
256 {
257 	/* {
258 		syscallarg(lwpid_t) target;
259 	} */
260 	int error;
261 	struct proc *p = l->l_proc;
262 	struct lwp *t;
263 
264 	error = 0;
265 
266 	mutex_enter(p->p_lock);
267 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
268 		mutex_exit(p->p_lock);
269 		return ESRCH;
270 	}
271 
272 	lwp_lock(t);
273 	lwp_continue(t);
274 	mutex_exit(p->p_lock);
275 
276 	return error;
277 }
278 
279 int
280 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, register_t *retval)
281 {
282 	/* {
283 		syscallarg(lwpid_t) target;
284 	} */
285 	struct lwp *t;
286 	struct proc *p;
287 	int error;
288 
289 	p = l->l_proc;
290 	mutex_enter(p->p_lock);
291 
292 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
293 		mutex_exit(p->p_lock);
294 		return ESRCH;
295 	}
296 
297 	lwp_lock(t);
298 	t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
299 
300 	if (t->l_stat != LSSLEEP) {
301 		lwp_unlock(t);
302 		error = ENODEV;
303 	} else if ((t->l_flag & LW_SINTR) == 0) {
304 		lwp_unlock(t);
305 		error = EBUSY;
306 	} else {
307 		/* Wake it up.  lwp_unsleep() will release the LWP lock. */
308 		(void)lwp_unsleep(t, true);
309 		error = 0;
310 	}
311 
312 	mutex_exit(p->p_lock);
313 
314 	return error;
315 }
316 
317 int
318 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, register_t *retval)
319 {
320 	/* {
321 		syscallarg(lwpid_t) wait_for;
322 		syscallarg(lwpid_t *) departed;
323 	} */
324 	struct proc *p = l->l_proc;
325 	int error;
326 	lwpid_t dep;
327 
328 	mutex_enter(p->p_lock);
329 	error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
330 	mutex_exit(p->p_lock);
331 
332 	if (error)
333 		return error;
334 
335 	if (SCARG(uap, departed)) {
336 		error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
337 		if (error)
338 			return error;
339 	}
340 
341 	return 0;
342 }
343 
344 /* ARGSUSED */
345 int
346 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, register_t *retval)
347 {
348 	/* {
349 		syscallarg(lwpid_t)	target;
350 		syscallarg(int)		signo;
351 	} */
352 	struct proc *p = l->l_proc;
353 	struct lwp *t;
354 	ksiginfo_t ksi;
355 	int signo = SCARG(uap, signo);
356 	int error = 0;
357 
358 	if ((u_int)signo >= NSIG)
359 		return EINVAL;
360 
361 	KSI_INIT(&ksi);
362 	ksi.ksi_signo = signo;
363 	ksi.ksi_code = SI_USER;
364 	ksi.ksi_pid = p->p_pid;
365 	ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
366 	ksi.ksi_lid = SCARG(uap, target);
367 
368 	mutex_enter(proc_lock);
369 	mutex_enter(p->p_lock);
370 	if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
371 		error = ESRCH;
372 	else if (signo != 0)
373 		kpsignal2(p, &ksi);
374 	mutex_exit(p->p_lock);
375 	mutex_exit(proc_lock);
376 
377 	return error;
378 }
379 
380 int
381 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, register_t *retval)
382 {
383 	/* {
384 		syscallarg(lwpid_t)	target;
385 	} */
386 	struct proc *p;
387 	struct lwp *t;
388 	lwpid_t target;
389 	int error;
390 
391 	target = SCARG(uap, target);
392 	p = l->l_proc;
393 
394 	mutex_enter(p->p_lock);
395 
396 	if (l->l_lid == target)
397 		t = l;
398 	else {
399 		/*
400 		 * We can't use lwp_find() here because the target might
401 		 * be a zombie.
402 		 */
403 		LIST_FOREACH(t, &p->p_lwps, l_sibling)
404 			if (t->l_lid == target)
405 				break;
406 	}
407 
408 	/*
409 	 * If the LWP is already detached, there's nothing to do.
410 	 * If it's a zombie, we need to clean up after it.  LSZOMB
411 	 * is visible with the proc mutex held.
412 	 *
413 	 * After we have detached or released the LWP, kick any
414 	 * other LWPs that may be sitting in _lwp_wait(), waiting
415 	 * for the target LWP to exit.
416 	 */
417 	if (t != NULL && t->l_stat != LSIDL) {
418 		if ((t->l_prflag & LPR_DETACHED) == 0) {
419 			p->p_ndlwps++;
420 			t->l_prflag |= LPR_DETACHED;
421 			if (t->l_stat == LSZOMB) {
422 				/* Releases proc mutex. */
423 				lwp_free(t, false, false);
424 				return 0;
425 			}
426 			error = 0;
427 
428 			/*
429 			 * Have any LWPs sleeping in lwp_wait() recheck
430 			 * for deadlock.
431 			 */
432 			cv_broadcast(&p->p_lwpcv);
433 		} else
434 			error = EINVAL;
435 	} else
436 		error = ESRCH;
437 
438 	mutex_exit(p->p_lock);
439 
440 	return error;
441 }
442 
443 static inline wchan_t
444 lwp_park_wchan(struct proc *p, const void *hint)
445 {
446 
447 	return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
448 }
449 
450 int
451 lwp_unpark(lwpid_t target, const void *hint)
452 {
453 	sleepq_t *sq;
454 	wchan_t wchan;
455 	int swapin;
456 	proc_t *p;
457 	lwp_t *t;
458 
459 	/*
460 	 * Easy case: search for the LWP on the sleep queue.  If
461 	 * it's parked, remove it from the queue and set running.
462 	 */
463 	p = curproc;
464 	wchan = lwp_park_wchan(p, hint);
465 	sq = sleeptab_lookup(&lwp_park_tab, wchan);
466 
467 	TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain)
468 		if (t->l_proc == p && t->l_lid == target)
469 			break;
470 
471 	if (__predict_true(t != NULL)) {
472 		swapin = sleepq_remove(sq, t);
473 		sleepq_unlock(sq);
474 		if (swapin)
475 			uvm_kick_scheduler();
476 		return 0;
477 	}
478 
479 	/*
480 	 * The LWP hasn't parked yet.  Take the hit and mark the
481 	 * operation as pending.
482 	 */
483 	sleepq_unlock(sq);
484 
485 	mutex_enter(p->p_lock);
486 	if ((t = lwp_find(p, target)) == NULL) {
487 		mutex_exit(p->p_lock);
488 		return ESRCH;
489 	}
490 
491 	/*
492 	 * It may not have parked yet, we may have raced, or it
493 	 * is parked on a different user sync object.
494 	 */
495 	lwp_lock(t);
496 	if (t->l_syncobj == &lwp_park_sobj) {
497 		/* Releases the LWP lock. */
498 		(void)lwp_unsleep(t, true);
499 	} else {
500 		/*
501 		 * Set the operation pending.  The next call to _lwp_park
502 		 * will return early.
503 		 */
504 		t->l_flag |= LW_UNPARKED;
505 		lwp_unlock(t);
506 	}
507 
508 	mutex_exit(p->p_lock);
509 	return 0;
510 }
511 
512 int
513 lwp_park(struct timespec *ts, const void *hint)
514 {
515 	struct timespec tsx;
516 	sleepq_t *sq;
517 	wchan_t wchan;
518 	int timo, error;
519 	lwp_t *l;
520 
521 	/* Fix up the given timeout value. */
522 	if (ts != NULL) {
523 		getnanotime(&tsx);
524 		timespecsub(ts, &tsx, &tsx);
525 		if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0))
526 			return ETIMEDOUT;
527 		if ((error = itimespecfix(&tsx)) != 0)
528 			return error;
529 		timo = tstohz(&tsx);
530 		KASSERT(timo != 0);
531 	} else
532 		timo = 0;
533 
534 	/* Find and lock the sleep queue. */
535 	l = curlwp;
536 	wchan = lwp_park_wchan(l->l_proc, hint);
537 	sq = sleeptab_lookup(&lwp_park_tab, wchan);
538 
539 	/*
540 	 * Before going the full route and blocking, check to see if an
541 	 * unpark op is pending.
542 	 */
543 	lwp_lock(l);
544 	if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
545 		l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
546 		lwp_unlock(l);
547 		sleepq_unlock(sq);
548 		return EALREADY;
549 	}
550 	lwp_unlock_to(l, sq->sq_mutex);
551 	l->l_biglocks = 0;
552 	sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
553 	error = sleepq_block(timo, true);
554 	switch (error) {
555 	case EWOULDBLOCK:
556 		error = ETIMEDOUT;
557 		break;
558 	case ERESTART:
559 		error = EINTR;
560 		break;
561 	default:
562 		/* nothing */
563 		break;
564 	}
565 	return error;
566 }
567 
568 /*
569  * 'park' an LWP waiting on a user-level synchronisation object.  The LWP
570  * will remain parked until another LWP in the same process calls in and
571  * requests that it be unparked.
572  */
573 int
574 sys__lwp_park(struct lwp *l, const struct sys__lwp_park_args *uap, register_t *retval)
575 {
576 	/* {
577 		syscallarg(const struct timespec *)	ts;
578 		syscallarg(lwpid_t)			unpark;
579 		syscallarg(const void *)		hint;
580 		syscallarg(const void *)		unparkhint;
581 	} */
582 	struct timespec ts, *tsp;
583 	int error;
584 
585 	if (SCARG(uap, ts) == NULL)
586 		tsp = NULL;
587 	else {
588 		error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
589 		if (error != 0)
590 			return error;
591 		tsp = &ts;
592 	}
593 
594 	if (SCARG(uap, unpark) != 0) {
595 		error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
596 		if (error != 0)
597 			return error;
598 	}
599 
600 	return lwp_park(tsp, SCARG(uap, hint));
601 }
602 
603 int
604 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, register_t *retval)
605 {
606 	/* {
607 		syscallarg(lwpid_t)		target;
608 		syscallarg(const void *)	hint;
609 	} */
610 
611 	return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
612 }
613 
614 int
615 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval)
616 {
617 	/* {
618 		syscallarg(const lwpid_t *)	targets;
619 		syscallarg(size_t)		ntargets;
620 		syscallarg(const void *)	hint;
621 	} */
622 	struct proc *p;
623 	struct lwp *t;
624 	sleepq_t *sq;
625 	wchan_t wchan;
626 	lwpid_t targets[32], *tp, *tpp, *tmax, target;
627 	int swapin, error;
628 	u_int ntargets;
629 	size_t sz;
630 
631 	p = l->l_proc;
632 	ntargets = SCARG(uap, ntargets);
633 
634 	if (SCARG(uap, targets) == NULL) {
635 		/*
636 		 * Let the caller know how much we are willing to do, and
637 		 * let it unpark the LWPs in blocks.
638 		 */
639 		*retval = LWP_UNPARK_MAX;
640 		return 0;
641 	}
642 	if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
643 		return EINVAL;
644 
645 	/*
646 	 * Copy in the target array.  If it's a small number of LWPs, then
647 	 * place the numbers on the stack.
648 	 */
649 	sz = sizeof(target) * ntargets;
650 	if (sz <= sizeof(targets))
651 		tp = targets;
652 	else {
653 		tp = kmem_alloc(sz, KM_SLEEP);
654 		if (tp == NULL)
655 			return ENOMEM;
656 	}
657 	error = copyin(SCARG(uap, targets), tp, sz);
658 	if (error != 0) {
659 		if (tp != targets) {
660 			kmem_free(tp, sz);
661 		}
662 		return error;
663 	}
664 
665 	swapin = 0;
666 	wchan = lwp_park_wchan(p, SCARG(uap, hint));
667 	sq = sleeptab_lookup(&lwp_park_tab, wchan);
668 
669 	for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
670 		target = *tpp;
671 
672 		/*
673 		 * Easy case: search for the LWP on the sleep queue.  If
674 		 * it's parked, remove it from the queue and set running.
675 		 */
676 		TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain)
677 			if (t->l_proc == p && t->l_lid == target)
678 				break;
679 
680 		if (t != NULL) {
681 			swapin |= sleepq_remove(sq, t);
682 			continue;
683 		}
684 
685 		/*
686 		 * The LWP hasn't parked yet.  Take the hit and
687 		 * mark the operation as pending.
688 		 */
689 		sleepq_unlock(sq);
690 		mutex_enter(p->p_lock);
691 		if ((t = lwp_find(p, target)) == NULL) {
692 			mutex_exit(p->p_lock);
693 			sleepq_lock(sq);
694 			continue;
695 		}
696 		lwp_lock(t);
697 
698 		/*
699 		 * It may not have parked yet, we may have raced, or
700 		 * it is parked on a different user sync object.
701 		 */
702 		if (t->l_syncobj == &lwp_park_sobj) {
703 			/* Releases the LWP lock. */
704 			(void)lwp_unsleep(t, true);
705 		} else {
706 			/*
707 			 * Set the operation pending.  The next call to
708 			 * _lwp_park will return early.
709 			 */
710 			t->l_flag |= LW_UNPARKED;
711 			lwp_unlock(t);
712 		}
713 
714 		mutex_exit(p->p_lock);
715 		sleepq_lock(sq);
716 	}
717 
718 	sleepq_unlock(sq);
719 	if (tp != targets)
720 		kmem_free(tp, sz);
721 	if (swapin)
722 		uvm_kick_scheduler();
723 
724 	return 0;
725 }
726 
727 int
728 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, register_t *retval)
729 {
730 	/* {
731 		syscallarg(lwpid_t)		target;
732 		syscallarg(const char *)	name;
733 	} */
734 	char *name, *oname;
735 	lwpid_t target;
736 	proc_t *p;
737 	lwp_t *t;
738 	int error;
739 
740 	if ((target = SCARG(uap, target)) == 0)
741 		target = l->l_lid;
742 
743 	name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
744 	if (name == NULL)
745 		return ENOMEM;
746 	error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
747 	switch (error) {
748 	case ENAMETOOLONG:
749 	case 0:
750 		name[MAXCOMLEN - 1] = '\0';
751 		break;
752 	default:
753 		kmem_free(name, MAXCOMLEN);
754 		return error;
755 	}
756 
757 	p = curproc;
758 	mutex_enter(p->p_lock);
759 	if ((t = lwp_find(p, target)) == NULL) {
760 		mutex_exit(p->p_lock);
761 		kmem_free(name, MAXCOMLEN);
762 		return ESRCH;
763 	}
764 	lwp_lock(t);
765 	oname = t->l_name;
766 	t->l_name = name;
767 	lwp_unlock(t);
768 	mutex_exit(p->p_lock);
769 
770 	if (oname != NULL)
771 		kmem_free(oname, MAXCOMLEN);
772 
773 	return 0;
774 }
775 
776 int
777 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, register_t *retval)
778 {
779 	/* {
780 		syscallarg(lwpid_t)		target;
781 		syscallarg(char *)		name;
782 		syscallarg(size_t)		len;
783 	} */
784 	char name[MAXCOMLEN];
785 	lwpid_t target;
786 	proc_t *p;
787 	lwp_t *t;
788 
789 	if ((target = SCARG(uap, target)) == 0)
790 		target = l->l_lid;
791 
792 	p = curproc;
793 	mutex_enter(p->p_lock);
794 	if ((t = lwp_find(p, target)) == NULL) {
795 		mutex_exit(p->p_lock);
796 		return ESRCH;
797 	}
798 	lwp_lock(t);
799 	if (t->l_name == NULL)
800 		name[0] = '\0';
801 	else
802 		strcpy(name, t->l_name);
803 	lwp_unlock(t);
804 	mutex_exit(p->p_lock);
805 
806 	return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL);
807 }
808 
809 int
810 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, register_t *retval)
811 {
812 	/* {
813 		syscallarg(int)			features;
814 		syscallarg(struct lwpctl **)	address;
815 	} */
816 	int error, features;
817 	vaddr_t vaddr;
818 
819 	features = SCARG(uap, features);
820 	features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
821 	if (features != 0)
822 		return ENODEV;
823 	if ((error = lwp_ctl_alloc(&vaddr)) != 0)
824 		return error;
825 	return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
826 }
827