xref: /netbsd-src/sys/kern/sys_lwp.c (revision cac8e449158efc7261bebc8657cbb0125a2cfdde)
1 /*	$NetBSD: sys_lwp.c,v 1.41 2008/05/26 12:08:39 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Lightweight process (LWP) system calls.  See kern_lwp.c for a description
34  * of LWPs.
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.41 2008/05/26 12:08:39 ad Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/types.h>
45 #include <sys/syscallargs.h>
46 #include <sys/kauth.h>
47 #include <sys/kmem.h>
48 #include <sys/sleepq.h>
49 #include <sys/lwpctl.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #define	LWP_UNPARK_MAX		1024
54 
55 syncobj_t lwp_park_sobj = {
56 	SOBJ_SLEEPQ_LIFO,
57 	sleepq_unsleep,
58 	sleepq_changepri,
59 	sleepq_lendpri,
60 	syncobj_noowner,
61 };
62 
63 sleeptab_t	lwp_park_tab;
64 
65 void
66 lwp_sys_init(void)
67 {
68 	sleeptab_init(&lwp_park_tab);
69 }
70 
71 /* ARGSUSED */
72 int
73 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval)
74 {
75 	/* {
76 		syscallarg(const ucontext_t *) ucp;
77 		syscallarg(u_long) flags;
78 		syscallarg(lwpid_t *) new_lwp;
79 	} */
80 	struct proc *p = l->l_proc;
81 	struct lwp *l2;
82 	vaddr_t uaddr;
83 	bool inmem;
84 	ucontext_t *newuc;
85 	int error, lid;
86 
87 	newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
88 
89 	error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
90 	if (error) {
91 		pool_put(&lwp_uc_pool, newuc);
92 		return error;
93 	}
94 
95 	/* XXX check against resource limits */
96 
97 	inmem = uvm_uarea_alloc(&uaddr);
98 	if (__predict_false(uaddr == 0)) {
99 		pool_put(&lwp_uc_pool, newuc);
100 		return ENOMEM;
101 	}
102 
103 	error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED,
104 	    NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class);
105 	if (error) {
106 		uvm_uarea_free(uaddr, curcpu());
107 		pool_put(&lwp_uc_pool, newuc);
108 		return error;
109 	}
110 
111 	lid = l2->l_lid;
112 	error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
113 	if (error) {
114 		lwp_exit(l2);
115 		pool_put(&lwp_uc_pool, newuc);
116 		return error;
117 	}
118 
119 	/*
120 	 * Set the new LWP running, unless the caller has requested that
121 	 * it be created in suspended state.  If the process is stopping,
122 	 * then the LWP is created stopped.
123 	 */
124 	mutex_enter(p->p_lock);
125 	lwp_lock(l2);
126 	if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 &&
127 	    (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
128 	    	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0)
129 	    		l2->l_stat = LSSTOP;
130 		else {
131 			KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex));
132 			p->p_nrlwps++;
133 			l2->l_stat = LSRUN;
134 			sched_enqueue(l2, false);
135 		}
136 		lwp_unlock(l2);
137 	} else {
138 		l2->l_stat = LSSUSPENDED;
139 		lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock);
140 	}
141 	mutex_exit(p->p_lock);
142 
143 	return 0;
144 }
145 
146 int
147 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
148 {
149 
150 	lwp_exit(l);
151 	return 0;
152 }
153 
154 int
155 sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
156 {
157 
158 	*retval = l->l_lid;
159 	return 0;
160 }
161 
162 int
163 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
164 {
165 
166 	*retval = (uintptr_t)l->l_private;
167 	return 0;
168 }
169 
170 int
171 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, register_t *retval)
172 {
173 	/* {
174 		syscallarg(void *) ptr;
175 	} */
176 
177 	l->l_private = SCARG(uap, ptr);
178 	return 0;
179 }
180 
181 int
182 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, register_t *retval)
183 {
184 	/* {
185 		syscallarg(lwpid_t) target;
186 	} */
187 	struct proc *p = l->l_proc;
188 	struct lwp *t;
189 	int error;
190 
191 	mutex_enter(p->p_lock);
192 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
193 		mutex_exit(p->p_lock);
194 		return ESRCH;
195 	}
196 
197 	/*
198 	 * Check for deadlock, which is only possible when we're suspending
199 	 * ourself.  XXX There is a short race here, as p_nrlwps is only
200 	 * incremented when an LWP suspends itself on the kernel/user
201 	 * boundary.  It's still possible to kill -9 the process so we
202 	 * don't bother checking further.
203 	 */
204 	lwp_lock(t);
205 	if ((t == l && p->p_nrlwps == 1) ||
206 	    (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
207 		lwp_unlock(t);
208 		mutex_exit(p->p_lock);
209 		return EDEADLK;
210 	}
211 
212 	/*
213 	 * Suspend the LWP.  XXX If it's on a different CPU, we should wait
214 	 * for it to be preempted, where it will put itself to sleep.
215 	 *
216 	 * Suspension of the current LWP will happen on return to userspace.
217 	 */
218 	error = lwp_suspend(l, t);
219 	if (error) {
220 		mutex_exit(p->p_lock);
221 		return error;
222 	}
223 
224 	/*
225 	 * Wait for:
226 	 *  o process exiting
227 	 *  o target LWP suspended
228 	 *  o target LWP not suspended and L_WSUSPEND clear
229 	 *  o target LWP exited
230 	 */
231 	for (;;) {
232 		error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
233 		if (error) {
234 			error = ERESTART;
235 			break;
236 		}
237 		if (lwp_find(p, SCARG(uap, target)) == NULL) {
238 			error = ESRCH;
239 			break;
240 		}
241 		if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
242 			error = ERESTART;
243 			break;
244 		}
245 		if (t->l_stat == LSSUSPENDED ||
246 		    (t->l_flag & LW_WSUSPEND) == 0)
247 			break;
248 	}
249 	mutex_exit(p->p_lock);
250 
251 	return error;
252 }
253 
254 int
255 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, register_t *retval)
256 {
257 	/* {
258 		syscallarg(lwpid_t) target;
259 	} */
260 	int error;
261 	struct proc *p = l->l_proc;
262 	struct lwp *t;
263 
264 	error = 0;
265 
266 	mutex_enter(p->p_lock);
267 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
268 		mutex_exit(p->p_lock);
269 		return ESRCH;
270 	}
271 
272 	lwp_lock(t);
273 	lwp_continue(t);
274 	mutex_exit(p->p_lock);
275 
276 	return error;
277 }
278 
279 int
280 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, register_t *retval)
281 {
282 	/* {
283 		syscallarg(lwpid_t) target;
284 	} */
285 	struct lwp *t;
286 	struct proc *p;
287 	int error;
288 
289 	p = l->l_proc;
290 	mutex_enter(p->p_lock);
291 
292 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
293 		mutex_exit(p->p_lock);
294 		return ESRCH;
295 	}
296 
297 	lwp_lock(t);
298 	t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
299 
300 	if (t->l_stat != LSSLEEP) {
301 		lwp_unlock(t);
302 		error = ENODEV;
303 	} else if ((t->l_flag & LW_SINTR) == 0) {
304 		lwp_unlock(t);
305 		error = EBUSY;
306 	} else {
307 		/* Wake it up.  lwp_unsleep() will release the LWP lock. */
308 		(void)lwp_unsleep(t, true);
309 		error = 0;
310 	}
311 
312 	mutex_exit(p->p_lock);
313 
314 	return error;
315 }
316 
317 int
318 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, register_t *retval)
319 {
320 	/* {
321 		syscallarg(lwpid_t) wait_for;
322 		syscallarg(lwpid_t *) departed;
323 	} */
324 	struct proc *p = l->l_proc;
325 	int error;
326 	lwpid_t dep;
327 
328 	mutex_enter(p->p_lock);
329 	error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
330 	mutex_exit(p->p_lock);
331 
332 	if (error)
333 		return error;
334 
335 	if (SCARG(uap, departed)) {
336 		error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
337 		if (error)
338 			return error;
339 	}
340 
341 	return 0;
342 }
343 
344 /* ARGSUSED */
345 int
346 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, register_t *retval)
347 {
348 	/* {
349 		syscallarg(lwpid_t)	target;
350 		syscallarg(int)		signo;
351 	} */
352 	struct proc *p = l->l_proc;
353 	struct lwp *t;
354 	ksiginfo_t ksi;
355 	int signo = SCARG(uap, signo);
356 	int error = 0;
357 
358 	if ((u_int)signo >= NSIG)
359 		return EINVAL;
360 
361 	KSI_INIT(&ksi);
362 	ksi.ksi_signo = signo;
363 	ksi.ksi_code = SI_USER;
364 	ksi.ksi_pid = p->p_pid;
365 	ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
366 	ksi.ksi_lid = SCARG(uap, target);
367 
368 	mutex_enter(proc_lock);
369 	mutex_enter(p->p_lock);
370 	if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
371 		error = ESRCH;
372 	else if (signo != 0)
373 		kpsignal2(p, &ksi);
374 	mutex_exit(p->p_lock);
375 	mutex_exit(proc_lock);
376 
377 	return error;
378 }
379 
380 int
381 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, register_t *retval)
382 {
383 	/* {
384 		syscallarg(lwpid_t)	target;
385 	} */
386 	struct proc *p;
387 	struct lwp *t;
388 	lwpid_t target;
389 	int error;
390 
391 	target = SCARG(uap, target);
392 	p = l->l_proc;
393 
394 	mutex_enter(p->p_lock);
395 
396 	if (l->l_lid == target)
397 		t = l;
398 	else {
399 		/*
400 		 * We can't use lwp_find() here because the target might
401 		 * be a zombie.
402 		 */
403 		LIST_FOREACH(t, &p->p_lwps, l_sibling)
404 			if (t->l_lid == target)
405 				break;
406 	}
407 
408 	/*
409 	 * If the LWP is already detached, there's nothing to do.
410 	 * If it's a zombie, we need to clean up after it.  LSZOMB
411 	 * is visible with the proc mutex held.
412 	 *
413 	 * After we have detached or released the LWP, kick any
414 	 * other LWPs that may be sitting in _lwp_wait(), waiting
415 	 * for the target LWP to exit.
416 	 */
417 	if (t != NULL && t->l_stat != LSIDL) {
418 		if ((t->l_prflag & LPR_DETACHED) == 0) {
419 			p->p_ndlwps++;
420 			t->l_prflag |= LPR_DETACHED;
421 			if (t->l_stat == LSZOMB) {
422 				/* Releases proc mutex. */
423 				lwp_free(t, false, false);
424 				return 0;
425 			}
426 			error = 0;
427 
428 			/*
429 			 * Have any LWPs sleeping in lwp_wait() recheck
430 			 * for deadlock.
431 			 */
432 			cv_broadcast(&p->p_lwpcv);
433 		} else
434 			error = EINVAL;
435 	} else
436 		error = ESRCH;
437 
438 	mutex_exit(p->p_lock);
439 
440 	return error;
441 }
442 
443 static inline wchan_t
444 lwp_park_wchan(struct proc *p, const void *hint)
445 {
446 
447 	return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
448 }
449 
450 int
451 lwp_unpark(lwpid_t target, const void *hint)
452 {
453 	sleepq_t *sq;
454 	wchan_t wchan;
455 	int swapin;
456 	kmutex_t *mp;
457 	proc_t *p;
458 	lwp_t *t;
459 
460 	/*
461 	 * Easy case: search for the LWP on the sleep queue.  If
462 	 * it's parked, remove it from the queue and set running.
463 	 */
464 	p = curproc;
465 	wchan = lwp_park_wchan(p, hint);
466 	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
467 
468 	TAILQ_FOREACH(t, sq, l_sleepchain)
469 		if (t->l_proc == p && t->l_lid == target)
470 			break;
471 
472 	if (__predict_true(t != NULL)) {
473 		swapin = sleepq_remove(sq, t);
474 		mutex_spin_exit(mp);
475 		if (swapin)
476 			uvm_kick_scheduler();
477 		return 0;
478 	}
479 
480 	/*
481 	 * The LWP hasn't parked yet.  Take the hit and mark the
482 	 * operation as pending.
483 	 */
484 	mutex_spin_exit(mp);
485 
486 	mutex_enter(p->p_lock);
487 	if ((t = lwp_find(p, target)) == NULL) {
488 		mutex_exit(p->p_lock);
489 		return ESRCH;
490 	}
491 
492 	/*
493 	 * It may not have parked yet, we may have raced, or it
494 	 * is parked on a different user sync object.
495 	 */
496 	lwp_lock(t);
497 	if (t->l_syncobj == &lwp_park_sobj) {
498 		/* Releases the LWP lock. */
499 		(void)lwp_unsleep(t, true);
500 	} else {
501 		/*
502 		 * Set the operation pending.  The next call to _lwp_park
503 		 * will return early.
504 		 */
505 		t->l_flag |= LW_UNPARKED;
506 		lwp_unlock(t);
507 	}
508 
509 	mutex_exit(p->p_lock);
510 	return 0;
511 }
512 
513 int
514 lwp_park(struct timespec *ts, const void *hint)
515 {
516 	struct timespec tsx;
517 	sleepq_t *sq;
518 	kmutex_t *mp;
519 	wchan_t wchan;
520 	int timo, error;
521 	lwp_t *l;
522 
523 	/* Fix up the given timeout value. */
524 	if (ts != NULL) {
525 		getnanotime(&tsx);
526 		timespecsub(ts, &tsx, &tsx);
527 		if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0))
528 			return ETIMEDOUT;
529 		if ((error = itimespecfix(&tsx)) != 0)
530 			return error;
531 		timo = tstohz(&tsx);
532 		KASSERT(timo != 0);
533 	} else
534 		timo = 0;
535 
536 	/* Find and lock the sleep queue. */
537 	l = curlwp;
538 	wchan = lwp_park_wchan(l->l_proc, hint);
539 	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
540 
541 	/*
542 	 * Before going the full route and blocking, check to see if an
543 	 * unpark op is pending.
544 	 */
545 	lwp_lock(l);
546 	if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
547 		l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
548 		lwp_unlock(l);
549 		mutex_spin_exit(mp);
550 		return EALREADY;
551 	}
552 	lwp_unlock_to(l, mp);
553 	l->l_biglocks = 0;
554 	sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
555 	error = sleepq_block(timo, true);
556 	switch (error) {
557 	case EWOULDBLOCK:
558 		error = ETIMEDOUT;
559 		break;
560 	case ERESTART:
561 		error = EINTR;
562 		break;
563 	default:
564 		/* nothing */
565 		break;
566 	}
567 	return error;
568 }
569 
570 /*
571  * 'park' an LWP waiting on a user-level synchronisation object.  The LWP
572  * will remain parked until another LWP in the same process calls in and
573  * requests that it be unparked.
574  */
575 int
576 sys__lwp_park(struct lwp *l, const struct sys__lwp_park_args *uap, register_t *retval)
577 {
578 	/* {
579 		syscallarg(const struct timespec *)	ts;
580 		syscallarg(lwpid_t)			unpark;
581 		syscallarg(const void *)		hint;
582 		syscallarg(const void *)		unparkhint;
583 	} */
584 	struct timespec ts, *tsp;
585 	int error;
586 
587 	if (SCARG(uap, ts) == NULL)
588 		tsp = NULL;
589 	else {
590 		error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
591 		if (error != 0)
592 			return error;
593 		tsp = &ts;
594 	}
595 
596 	if (SCARG(uap, unpark) != 0) {
597 		error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
598 		if (error != 0)
599 			return error;
600 	}
601 
602 	return lwp_park(tsp, SCARG(uap, hint));
603 }
604 
605 int
606 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, register_t *retval)
607 {
608 	/* {
609 		syscallarg(lwpid_t)		target;
610 		syscallarg(const void *)	hint;
611 	} */
612 
613 	return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
614 }
615 
616 int
617 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval)
618 {
619 	/* {
620 		syscallarg(const lwpid_t *)	targets;
621 		syscallarg(size_t)		ntargets;
622 		syscallarg(const void *)	hint;
623 	} */
624 	struct proc *p;
625 	struct lwp *t;
626 	sleepq_t *sq;
627 	wchan_t wchan;
628 	lwpid_t targets[32], *tp, *tpp, *tmax, target;
629 	int swapin, error;
630 	kmutex_t *mp;
631 	u_int ntargets;
632 	size_t sz;
633 
634 	p = l->l_proc;
635 	ntargets = SCARG(uap, ntargets);
636 
637 	if (SCARG(uap, targets) == NULL) {
638 		/*
639 		 * Let the caller know how much we are willing to do, and
640 		 * let it unpark the LWPs in blocks.
641 		 */
642 		*retval = LWP_UNPARK_MAX;
643 		return 0;
644 	}
645 	if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
646 		return EINVAL;
647 
648 	/*
649 	 * Copy in the target array.  If it's a small number of LWPs, then
650 	 * place the numbers on the stack.
651 	 */
652 	sz = sizeof(target) * ntargets;
653 	if (sz <= sizeof(targets))
654 		tp = targets;
655 	else {
656 		tp = kmem_alloc(sz, KM_SLEEP);
657 		if (tp == NULL)
658 			return ENOMEM;
659 	}
660 	error = copyin(SCARG(uap, targets), tp, sz);
661 	if (error != 0) {
662 		if (tp != targets) {
663 			kmem_free(tp, sz);
664 		}
665 		return error;
666 	}
667 
668 	swapin = 0;
669 	wchan = lwp_park_wchan(p, SCARG(uap, hint));
670 	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
671 
672 	for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
673 		target = *tpp;
674 
675 		/*
676 		 * Easy case: search for the LWP on the sleep queue.  If
677 		 * it's parked, remove it from the queue and set running.
678 		 */
679 		TAILQ_FOREACH(t, sq, l_sleepchain)
680 			if (t->l_proc == p && t->l_lid == target)
681 				break;
682 
683 		if (t != NULL) {
684 			swapin |= sleepq_remove(sq, t);
685 			continue;
686 		}
687 
688 		/*
689 		 * The LWP hasn't parked yet.  Take the hit and
690 		 * mark the operation as pending.
691 		 */
692 		mutex_spin_exit(mp);
693 		mutex_enter(p->p_lock);
694 		if ((t = lwp_find(p, target)) == NULL) {
695 			mutex_exit(p->p_lock);
696 			mutex_spin_enter(mp);
697 			continue;
698 		}
699 		lwp_lock(t);
700 
701 		/*
702 		 * It may not have parked yet, we may have raced, or
703 		 * it is parked on a different user sync object.
704 		 */
705 		if (t->l_syncobj == &lwp_park_sobj) {
706 			/* Releases the LWP lock. */
707 			(void)lwp_unsleep(t, true);
708 		} else {
709 			/*
710 			 * Set the operation pending.  The next call to
711 			 * _lwp_park will return early.
712 			 */
713 			t->l_flag |= LW_UNPARKED;
714 			lwp_unlock(t);
715 		}
716 
717 		mutex_exit(p->p_lock);
718 		mutex_spin_enter(mp);
719 	}
720 
721 	mutex_spin_exit(mp);
722 	if (tp != targets)
723 		kmem_free(tp, sz);
724 	if (swapin)
725 		uvm_kick_scheduler();
726 
727 	return 0;
728 }
729 
730 int
731 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, register_t *retval)
732 {
733 	/* {
734 		syscallarg(lwpid_t)		target;
735 		syscallarg(const char *)	name;
736 	} */
737 	char *name, *oname;
738 	lwpid_t target;
739 	proc_t *p;
740 	lwp_t *t;
741 	int error;
742 
743 	if ((target = SCARG(uap, target)) == 0)
744 		target = l->l_lid;
745 
746 	name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
747 	if (name == NULL)
748 		return ENOMEM;
749 	error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
750 	switch (error) {
751 	case ENAMETOOLONG:
752 	case 0:
753 		name[MAXCOMLEN - 1] = '\0';
754 		break;
755 	default:
756 		kmem_free(name, MAXCOMLEN);
757 		return error;
758 	}
759 
760 	p = curproc;
761 	mutex_enter(p->p_lock);
762 	if ((t = lwp_find(p, target)) == NULL) {
763 		mutex_exit(p->p_lock);
764 		kmem_free(name, MAXCOMLEN);
765 		return ESRCH;
766 	}
767 	lwp_lock(t);
768 	oname = t->l_name;
769 	t->l_name = name;
770 	lwp_unlock(t);
771 	mutex_exit(p->p_lock);
772 
773 	if (oname != NULL)
774 		kmem_free(oname, MAXCOMLEN);
775 
776 	return 0;
777 }
778 
779 int
780 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, register_t *retval)
781 {
782 	/* {
783 		syscallarg(lwpid_t)		target;
784 		syscallarg(char *)		name;
785 		syscallarg(size_t)		len;
786 	} */
787 	char name[MAXCOMLEN];
788 	lwpid_t target;
789 	proc_t *p;
790 	lwp_t *t;
791 
792 	if ((target = SCARG(uap, target)) == 0)
793 		target = l->l_lid;
794 
795 	p = curproc;
796 	mutex_enter(p->p_lock);
797 	if ((t = lwp_find(p, target)) == NULL) {
798 		mutex_exit(p->p_lock);
799 		return ESRCH;
800 	}
801 	lwp_lock(t);
802 	if (t->l_name == NULL)
803 		name[0] = '\0';
804 	else
805 		strcpy(name, t->l_name);
806 	lwp_unlock(t);
807 	mutex_exit(p->p_lock);
808 
809 	return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL);
810 }
811 
812 int
813 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, register_t *retval)
814 {
815 	/* {
816 		syscallarg(int)			features;
817 		syscallarg(struct lwpctl **)	address;
818 	} */
819 	int error, features;
820 	vaddr_t vaddr;
821 
822 	features = SCARG(uap, features);
823 	features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
824 	if (features != 0)
825 		return ENODEV;
826 	if ((error = lwp_ctl_alloc(&vaddr)) != 0)
827 		return error;
828 	return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
829 }
830