xref: /netbsd-src/sys/kern/sys_lwp.c (revision 404ee5b9334f618040b6cdef96a0ff35a6fc4636)
1 /*	$NetBSD: sys_lwp.c,v 1.70 2019/09/30 21:13:33 kamil Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Lightweight process (LWP) system calls.  See kern_lwp.c for a description
34  * of LWPs.
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.70 2019/09/30 21:13:33 kamil Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/types.h>
45 #include <sys/syscallargs.h>
46 #include <sys/kauth.h>
47 #include <sys/kmem.h>
48 #include <sys/ptrace.h>
49 #include <sys/sleepq.h>
50 #include <sys/lwpctl.h>
51 #include <sys/cpu.h>
52 
53 #include <uvm/uvm_extern.h>
54 
55 #define	LWP_UNPARK_MAX		1024
56 
57 static const stack_t lwp_ss_init = SS_INIT;
58 
59 static syncobj_t lwp_park_sobj = {
60 	.sobj_flag	= SOBJ_SLEEPQ_LIFO,
61 	.sobj_unsleep	= sleepq_unsleep,
62 	.sobj_changepri	= sleepq_changepri,
63 	.sobj_lendpri	= sleepq_lendpri,
64 	.sobj_owner	= syncobj_noowner,
65 };
66 
67 static sleeptab_t	lwp_park_tab;
68 
69 void
70 lwp_sys_init(void)
71 {
72 	sleeptab_init(&lwp_park_tab);
73 }
74 
75 static void
76 mi_startlwp(void *arg)
77 {
78 	struct lwp *l = curlwp;
79 	struct proc *p = l->l_proc;
80 
81 	(p->p_emul->e_startlwp)(arg);
82 
83 	/* If the process is traced, report lwp creation to a debugger */
84 	if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) ==
85 	    (PSL_TRACED|PSL_TRACELWP_CREATE)) {
86 		/* Paranoid check */
87 		mutex_enter(proc_lock);
88 		if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) !=
89 		    (PSL_TRACED|PSL_TRACELWP_CREATE)) {
90 			mutex_exit(proc_lock);
91 			return;
92 		}
93 
94 		mutex_enter(p->p_lock);
95 		eventswitch(TRAP_LWP, PTRACE_LWP_CREATE, l->l_lid);
96 	}
97 }
98 
99 int
100 do_lwp_create(lwp_t *l, void *arg, u_long flags, lwpid_t *new_lwp,
101     const sigset_t *sigmask, const stack_t *sigstk)
102 {
103 	struct proc *p = l->l_proc;
104 	struct lwp *l2;
105 	struct schedstate_percpu *spc;
106 	vaddr_t uaddr;
107 	int error;
108 
109 	/* XXX check against resource limits */
110 
111 	uaddr = uvm_uarea_alloc();
112 	if (__predict_false(uaddr == 0))
113 		return ENOMEM;
114 
115 	error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0,
116 	    mi_startlwp, arg, &l2, l->l_class, sigmask, &lwp_ss_init);
117 	if (__predict_false(error)) {
118 		uvm_uarea_free(uaddr);
119 		return error;
120 	}
121 
122 	*new_lwp = l2->l_lid;
123 
124 	/*
125 	 * Set the new LWP running, unless the caller has requested that
126 	 * it be created in suspended state.  If the process is stopping,
127 	 * then the LWP is created stopped.
128 	 */
129 	mutex_enter(p->p_lock);
130 	lwp_lock(l2);
131 	spc = &l2->l_cpu->ci_schedstate;
132 	if ((flags & LWP_SUSPENDED) == 0 &&
133 	    (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
134 	    	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
135 			KASSERT(l2->l_wchan == NULL);
136 	    		l2->l_stat = LSSTOP;
137 			p->p_nrlwps--;
138 			lwp_unlock_to(l2, spc->spc_lwplock);
139 		} else {
140 			KASSERT(lwp_locked(l2, spc->spc_mutex));
141 			l2->l_stat = LSRUN;
142 			sched_enqueue(l2, false);
143 			lwp_unlock(l2);
144 		}
145 	} else {
146 		l2->l_stat = LSSUSPENDED;
147 		p->p_nrlwps--;
148 		lwp_unlock_to(l2, spc->spc_lwplock);
149 	}
150 	mutex_exit(p->p_lock);
151 
152 	return 0;
153 }
154 
155 int
156 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
157     register_t *retval)
158 {
159 	/* {
160 		syscallarg(const ucontext_t *) ucp;
161 		syscallarg(u_long) flags;
162 		syscallarg(lwpid_t *) new_lwp;
163 	} */
164 	struct proc *p = l->l_proc;
165 	ucontext_t *newuc;
166 	lwpid_t lid;
167 	int error;
168 
169 	newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
170 	error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
171 	if (error)
172 		goto fail;
173 
174 	/* validate the ucontext */
175 	if ((newuc->uc_flags & _UC_CPU) == 0) {
176 		error = EINVAL;
177 		goto fail;
178 	}
179 	error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
180 	if (error)
181 		goto fail;
182 
183 	const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ?
184 	    &newuc->uc_sigmask : &l->l_sigmask;
185 	error = do_lwp_create(l, newuc, SCARG(uap, flags), &lid, sigmask,
186 	    &SS_INIT);
187 	if (error)
188 		goto fail;
189 
190 	/*
191 	 * do not free ucontext in case of an error here,
192 	 * the lwp will actually run and access it
193 	 */
194 	return copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
195 
196 fail:
197 	kmem_free(newuc, sizeof(ucontext_t));
198 	return error;
199 }
200 
201 int
202 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
203 {
204 
205 	lwp_exit(l);
206 	return 0;
207 }
208 
209 int
210 sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
211 {
212 
213 	*retval = l->l_lid;
214 	return 0;
215 }
216 
217 int
218 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
219 {
220 
221 	*retval = (uintptr_t)l->l_private;
222 	return 0;
223 }
224 
225 int
226 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
227     register_t *retval)
228 {
229 	/* {
230 		syscallarg(void *) ptr;
231 	} */
232 
233 	return lwp_setprivate(l, SCARG(uap, ptr));
234 }
235 
236 int
237 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
238     register_t *retval)
239 {
240 	/* {
241 		syscallarg(lwpid_t) target;
242 	} */
243 	struct proc *p = l->l_proc;
244 	struct lwp *t;
245 	int error;
246 
247 	mutex_enter(p->p_lock);
248 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
249 		mutex_exit(p->p_lock);
250 		return ESRCH;
251 	}
252 
253 	/*
254 	 * Check for deadlock, which is only possible when we're suspending
255 	 * ourself.  XXX There is a short race here, as p_nrlwps is only
256 	 * incremented when an LWP suspends itself on the kernel/user
257 	 * boundary.  It's still possible to kill -9 the process so we
258 	 * don't bother checking further.
259 	 */
260 	lwp_lock(t);
261 	if ((t == l && p->p_nrlwps == 1) ||
262 	    (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
263 		lwp_unlock(t);
264 		mutex_exit(p->p_lock);
265 		return EDEADLK;
266 	}
267 
268 	/*
269 	 * Suspend the LWP.  XXX If it's on a different CPU, we should wait
270 	 * for it to be preempted, where it will put itself to sleep.
271 	 *
272 	 * Suspension of the current LWP will happen on return to userspace.
273 	 */
274 	error = lwp_suspend(l, t);
275 	if (error) {
276 		mutex_exit(p->p_lock);
277 		return error;
278 	}
279 
280 	/*
281 	 * Wait for:
282 	 *  o process exiting
283 	 *  o target LWP suspended
284 	 *  o target LWP not suspended and L_WSUSPEND clear
285 	 *  o target LWP exited
286 	 */
287 	for (;;) {
288 		error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
289 		if (error) {
290 			error = ERESTART;
291 			break;
292 		}
293 		if (lwp_find(p, SCARG(uap, target)) == NULL) {
294 			error = ESRCH;
295 			break;
296 		}
297 		if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
298 			error = ERESTART;
299 			break;
300 		}
301 		if (t->l_stat == LSSUSPENDED ||
302 		    (t->l_flag & LW_WSUSPEND) == 0)
303 			break;
304 	}
305 	mutex_exit(p->p_lock);
306 
307 	return error;
308 }
309 
310 int
311 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
312     register_t *retval)
313 {
314 	/* {
315 		syscallarg(lwpid_t) target;
316 	} */
317 	int error;
318 	struct proc *p = l->l_proc;
319 	struct lwp *t;
320 
321 	error = 0;
322 
323 	mutex_enter(p->p_lock);
324 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
325 		mutex_exit(p->p_lock);
326 		return ESRCH;
327 	}
328 
329 	lwp_lock(t);
330 	lwp_continue(t);
331 	mutex_exit(p->p_lock);
332 
333 	return error;
334 }
335 
336 int
337 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
338     register_t *retval)
339 {
340 	/* {
341 		syscallarg(lwpid_t) target;
342 	} */
343 	struct lwp *t;
344 	struct proc *p;
345 	int error;
346 
347 	p = l->l_proc;
348 	mutex_enter(p->p_lock);
349 
350 	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
351 		mutex_exit(p->p_lock);
352 		return ESRCH;
353 	}
354 
355 	lwp_lock(t);
356 	t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
357 
358 	if (t->l_stat != LSSLEEP) {
359 		lwp_unlock(t);
360 		error = ENODEV;
361 	} else if ((t->l_flag & LW_SINTR) == 0) {
362 		lwp_unlock(t);
363 		error = EBUSY;
364 	} else {
365 		/* Wake it up.  lwp_unsleep() will release the LWP lock. */
366 		lwp_unsleep(t, true);
367 		error = 0;
368 	}
369 
370 	mutex_exit(p->p_lock);
371 
372 	return error;
373 }
374 
375 int
376 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
377     register_t *retval)
378 {
379 	/* {
380 		syscallarg(lwpid_t) wait_for;
381 		syscallarg(lwpid_t *) departed;
382 	} */
383 	struct proc *p = l->l_proc;
384 	int error;
385 	lwpid_t dep;
386 
387 	mutex_enter(p->p_lock);
388 	error = lwp_wait(l, SCARG(uap, wait_for), &dep, false);
389 	mutex_exit(p->p_lock);
390 
391 	if (!error && SCARG(uap, departed)) {
392 		error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
393 	}
394 
395 	return error;
396 }
397 
398 int
399 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
400     register_t *retval)
401 {
402 	/* {
403 		syscallarg(lwpid_t)	target;
404 		syscallarg(int)		signo;
405 	} */
406 	struct proc *p = l->l_proc;
407 	struct lwp *t;
408 	ksiginfo_t ksi;
409 	int signo = SCARG(uap, signo);
410 	int error = 0;
411 
412 	if ((u_int)signo >= NSIG)
413 		return EINVAL;
414 
415 	KSI_INIT(&ksi);
416 	ksi.ksi_signo = signo;
417 	ksi.ksi_code = SI_LWP;
418 	ksi.ksi_pid = p->p_pid;
419 	ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
420 	ksi.ksi_lid = SCARG(uap, target);
421 
422 	mutex_enter(proc_lock);
423 	mutex_enter(p->p_lock);
424 	if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
425 		error = ESRCH;
426 	else if (signo != 0)
427 		kpsignal2(p, &ksi);
428 	mutex_exit(p->p_lock);
429 	mutex_exit(proc_lock);
430 
431 	return error;
432 }
433 
434 int
435 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
436     register_t *retval)
437 {
438 	/* {
439 		syscallarg(lwpid_t)	target;
440 	} */
441 	struct proc *p;
442 	struct lwp *t;
443 	lwpid_t target;
444 	int error;
445 
446 	target = SCARG(uap, target);
447 	p = l->l_proc;
448 
449 	mutex_enter(p->p_lock);
450 
451 	if (l->l_lid == target)
452 		t = l;
453 	else {
454 		/*
455 		 * We can't use lwp_find() here because the target might
456 		 * be a zombie.
457 		 */
458 		LIST_FOREACH(t, &p->p_lwps, l_sibling)
459 			if (t->l_lid == target)
460 				break;
461 	}
462 
463 	/*
464 	 * If the LWP is already detached, there's nothing to do.
465 	 * If it's a zombie, we need to clean up after it.  LSZOMB
466 	 * is visible with the proc mutex held.
467 	 *
468 	 * After we have detached or released the LWP, kick any
469 	 * other LWPs that may be sitting in _lwp_wait(), waiting
470 	 * for the target LWP to exit.
471 	 */
472 	if (t != NULL && t->l_stat != LSIDL) {
473 		if ((t->l_prflag & LPR_DETACHED) == 0) {
474 			p->p_ndlwps++;
475 			t->l_prflag |= LPR_DETACHED;
476 			if (t->l_stat == LSZOMB) {
477 				/* Releases proc mutex. */
478 				lwp_free(t, false, false);
479 				return 0;
480 			}
481 			error = 0;
482 
483 			/*
484 			 * Have any LWPs sleeping in lwp_wait() recheck
485 			 * for deadlock.
486 			 */
487 			cv_broadcast(&p->p_lwpcv);
488 		} else
489 			error = EINVAL;
490 	} else
491 		error = ESRCH;
492 
493 	mutex_exit(p->p_lock);
494 
495 	return error;
496 }
497 
498 static inline wchan_t
499 lwp_park_wchan(struct proc *p, const void *hint)
500 {
501 
502 	return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
503 }
504 
505 int
506 lwp_unpark(lwpid_t target, const void *hint)
507 {
508 	sleepq_t *sq;
509 	wchan_t wchan;
510 	kmutex_t *mp;
511 	proc_t *p;
512 	lwp_t *t;
513 
514 	/*
515 	 * Easy case: search for the LWP on the sleep queue.  If
516 	 * it's parked, remove it from the queue and set running.
517 	 */
518 	p = curproc;
519 	wchan = lwp_park_wchan(p, hint);
520 	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
521 
522 	TAILQ_FOREACH(t, sq, l_sleepchain)
523 		if (t->l_proc == p && t->l_lid == target)
524 			break;
525 
526 	if (__predict_true(t != NULL)) {
527 		sleepq_remove(sq, t);
528 		mutex_spin_exit(mp);
529 		return 0;
530 	}
531 
532 	/*
533 	 * The LWP hasn't parked yet.  Take the hit and mark the
534 	 * operation as pending.
535 	 */
536 	mutex_spin_exit(mp);
537 
538 	mutex_enter(p->p_lock);
539 	if ((t = lwp_find(p, target)) == NULL) {
540 		mutex_exit(p->p_lock);
541 		return ESRCH;
542 	}
543 
544 	/*
545 	 * It may not have parked yet, we may have raced, or it
546 	 * is parked on a different user sync object.
547 	 */
548 	lwp_lock(t);
549 	if (t->l_syncobj == &lwp_park_sobj) {
550 		/* Releases the LWP lock. */
551 		lwp_unsleep(t, true);
552 	} else {
553 		/*
554 		 * Set the operation pending.  The next call to _lwp_park
555 		 * will return early.
556 		 */
557 		t->l_flag |= LW_UNPARKED;
558 		lwp_unlock(t);
559 	}
560 
561 	mutex_exit(p->p_lock);
562 	return 0;
563 }
564 
565 int
566 lwp_park(clockid_t clock_id, int flags, struct timespec *ts, const void *hint)
567 {
568 	sleepq_t *sq;
569 	kmutex_t *mp;
570 	wchan_t wchan;
571 	int timo, error;
572 	struct timespec start;
573 	lwp_t *l;
574 	bool timeremain = !(flags & TIMER_ABSTIME) && ts;
575 
576 	if (ts != NULL) {
577 		if ((error = ts2timo(clock_id, flags, ts, &timo,
578 		    timeremain ? &start : NULL)) != 0)
579 			return error;
580 		KASSERT(timo != 0);
581 	} else {
582 		timo = 0;
583 	}
584 
585 	/* Find and lock the sleep queue. */
586 	l = curlwp;
587 	wchan = lwp_park_wchan(l->l_proc, hint);
588 	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
589 
590 	/*
591 	 * Before going the full route and blocking, check to see if an
592 	 * unpark op is pending.
593 	 */
594 	lwp_lock(l);
595 	if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
596 		l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
597 		lwp_unlock(l);
598 		mutex_spin_exit(mp);
599 		return EALREADY;
600 	}
601 	lwp_unlock_to(l, mp);
602 	l->l_biglocks = 0;
603 	sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
604 	error = sleepq_block(timo, true);
605 	switch (error) {
606 	case EWOULDBLOCK:
607 		error = ETIMEDOUT;
608 		if (timeremain)
609 			memset(ts, 0, sizeof(*ts));
610 		break;
611 	case ERESTART:
612 		error = EINTR;
613 		/*FALLTHROUGH*/
614 	default:
615 		if (timeremain)
616 			clock_timeleft(clock_id, ts, &start);
617 		break;
618 	}
619 	return error;
620 }
621 
622 /*
623  * 'park' an LWP waiting on a user-level synchronisation object.  The LWP
624  * will remain parked until another LWP in the same process calls in and
625  * requests that it be unparked.
626  */
627 int
628 sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap,
629     register_t *retval)
630 {
631 	/* {
632 		syscallarg(clockid_t)			clock_id;
633 		syscallarg(int)				flags;
634 		syscallarg(struct timespec *)		ts;
635 		syscallarg(lwpid_t)			unpark;
636 		syscallarg(const void *)		hint;
637 		syscallarg(const void *)		unparkhint;
638 	} */
639 	struct timespec ts, *tsp;
640 	int error;
641 
642 	if (SCARG(uap, ts) == NULL)
643 		tsp = NULL;
644 	else {
645 		error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
646 		if (error != 0)
647 			return error;
648 		tsp = &ts;
649 	}
650 
651 	if (SCARG(uap, unpark) != 0) {
652 		error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
653 		if (error != 0)
654 			return error;
655 	}
656 
657 	error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp,
658 	    SCARG(uap, hint));
659 	if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0)
660 		(void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp));
661 	return error;
662 }
663 
664 int
665 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
666     register_t *retval)
667 {
668 	/* {
669 		syscallarg(lwpid_t)		target;
670 		syscallarg(const void *)	hint;
671 	} */
672 
673 	return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
674 }
675 
676 int
677 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
678     register_t *retval)
679 {
680 	/* {
681 		syscallarg(const lwpid_t *)	targets;
682 		syscallarg(size_t)		ntargets;
683 		syscallarg(const void *)	hint;
684 	} */
685 	struct proc *p;
686 	struct lwp *t;
687 	sleepq_t *sq;
688 	wchan_t wchan;
689 	lwpid_t targets[32], *tp, *tpp, *tmax, target;
690 	int error;
691 	kmutex_t *mp;
692 	u_int ntargets;
693 	size_t sz;
694 
695 	p = l->l_proc;
696 	ntargets = SCARG(uap, ntargets);
697 
698 	if (SCARG(uap, targets) == NULL) {
699 		/*
700 		 * Let the caller know how much we are willing to do, and
701 		 * let it unpark the LWPs in blocks.
702 		 */
703 		*retval = LWP_UNPARK_MAX;
704 		return 0;
705 	}
706 	if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
707 		return EINVAL;
708 
709 	/*
710 	 * Copy in the target array.  If it's a small number of LWPs, then
711 	 * place the numbers on the stack.
712 	 */
713 	sz = sizeof(target) * ntargets;
714 	if (sz <= sizeof(targets))
715 		tp = targets;
716 	else
717 		tp = kmem_alloc(sz, KM_SLEEP);
718 	error = copyin(SCARG(uap, targets), tp, sz);
719 	if (error != 0) {
720 		if (tp != targets) {
721 			kmem_free(tp, sz);
722 		}
723 		return error;
724 	}
725 
726 	wchan = lwp_park_wchan(p, SCARG(uap, hint));
727 	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
728 
729 	for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
730 		target = *tpp;
731 
732 		/*
733 		 * Easy case: search for the LWP on the sleep queue.  If
734 		 * it's parked, remove it from the queue and set running.
735 		 */
736 		TAILQ_FOREACH(t, sq, l_sleepchain)
737 			if (t->l_proc == p && t->l_lid == target)
738 				break;
739 
740 		if (t != NULL) {
741 			sleepq_remove(sq, t);
742 			continue;
743 		}
744 
745 		/*
746 		 * The LWP hasn't parked yet.  Take the hit and
747 		 * mark the operation as pending.
748 		 */
749 		mutex_spin_exit(mp);
750 		mutex_enter(p->p_lock);
751 		if ((t = lwp_find(p, target)) == NULL) {
752 			mutex_exit(p->p_lock);
753 			mutex_spin_enter(mp);
754 			continue;
755 		}
756 		lwp_lock(t);
757 
758 		/*
759 		 * It may not have parked yet, we may have raced, or
760 		 * it is parked on a different user sync object.
761 		 */
762 		if (t->l_syncobj == &lwp_park_sobj) {
763 			/* Releases the LWP lock. */
764 			lwp_unsleep(t, true);
765 		} else {
766 			/*
767 			 * Set the operation pending.  The next call to
768 			 * _lwp_park will return early.
769 			 */
770 			t->l_flag |= LW_UNPARKED;
771 			lwp_unlock(t);
772 		}
773 
774 		mutex_exit(p->p_lock);
775 		mutex_spin_enter(mp);
776 	}
777 
778 	mutex_spin_exit(mp);
779 	if (tp != targets)
780 		kmem_free(tp, sz);
781 
782 	return 0;
783 }
784 
785 int
786 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
787     register_t *retval)
788 {
789 	/* {
790 		syscallarg(lwpid_t)		target;
791 		syscallarg(const char *)	name;
792 	} */
793 	char *name, *oname;
794 	lwpid_t target;
795 	proc_t *p;
796 	lwp_t *t;
797 	int error;
798 
799 	if ((target = SCARG(uap, target)) == 0)
800 		target = l->l_lid;
801 
802 	name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
803 	error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
804 	switch (error) {
805 	case ENAMETOOLONG:
806 	case 0:
807 		name[MAXCOMLEN - 1] = '\0';
808 		break;
809 	default:
810 		kmem_free(name, MAXCOMLEN);
811 		return error;
812 	}
813 
814 	p = curproc;
815 	mutex_enter(p->p_lock);
816 	if ((t = lwp_find(p, target)) == NULL) {
817 		mutex_exit(p->p_lock);
818 		kmem_free(name, MAXCOMLEN);
819 		return ESRCH;
820 	}
821 	lwp_lock(t);
822 	oname = t->l_name;
823 	t->l_name = name;
824 	lwp_unlock(t);
825 	mutex_exit(p->p_lock);
826 
827 	if (oname != NULL)
828 		kmem_free(oname, MAXCOMLEN);
829 
830 	return 0;
831 }
832 
833 int
834 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
835     register_t *retval)
836 {
837 	/* {
838 		syscallarg(lwpid_t)		target;
839 		syscallarg(char *)		name;
840 		syscallarg(size_t)		len;
841 	} */
842 	char name[MAXCOMLEN];
843 	lwpid_t target;
844 	size_t len;
845 	proc_t *p;
846 	lwp_t *t;
847 
848 	if ((target = SCARG(uap, target)) == 0)
849 		target = l->l_lid;
850 
851 	p = curproc;
852 	mutex_enter(p->p_lock);
853 	if ((t = lwp_find(p, target)) == NULL) {
854 		mutex_exit(p->p_lock);
855 		return ESRCH;
856 	}
857 	lwp_lock(t);
858 	if (t->l_name == NULL)
859 		name[0] = '\0';
860 	else
861 		strlcpy(name, t->l_name, sizeof(name));
862 	lwp_unlock(t);
863 	mutex_exit(p->p_lock);
864 
865 	len = uimin(SCARG(uap, len), sizeof(name));
866 
867 	return copyoutstr(name, SCARG(uap, name), len, NULL);
868 }
869 
870 int
871 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
872     register_t *retval)
873 {
874 	/* {
875 		syscallarg(int)			features;
876 		syscallarg(struct lwpctl **)	address;
877 	} */
878 	int error, features;
879 	vaddr_t vaddr;
880 
881 	features = SCARG(uap, features);
882 	features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
883 	if (features != 0)
884 		return ENODEV;
885 	if ((error = lwp_ctl_alloc(&vaddr)) != 0)
886 		return error;
887 	return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
888 }
889