1 /* $NetBSD: sys_lwp.c,v 1.30 2007/11/12 23:11:59 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 41 * of LWPs. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.30 2007/11/12 23:11:59 ad Exp $"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/pool.h> 50 #include <sys/proc.h> 51 #include <sys/types.h> 52 #include <sys/syscallargs.h> 53 #include <sys/kauth.h> 54 #include <sys/kmem.h> 55 #include <sys/sleepq.h> 56 #include <sys/lwpctl.h> 57 58 #include <uvm/uvm_extern.h> 59 60 #define LWP_UNPARK_MAX 1024 61 62 syncobj_t lwp_park_sobj = { 63 SOBJ_SLEEPQ_LIFO, 64 sleepq_unsleep, 65 sleepq_changepri, 66 sleepq_lendpri, 67 syncobj_noowner, 68 }; 69 70 sleeptab_t lwp_park_tab; 71 72 void 73 lwp_sys_init(void) 74 { 75 sleeptab_init(&lwp_park_tab); 76 } 77 78 /* ARGSUSED */ 79 int 80 sys__lwp_create(struct lwp *l, void *v, register_t *retval) 81 { 82 struct sys__lwp_create_args /* { 83 syscallarg(const ucontext_t *) ucp; 84 syscallarg(u_long) flags; 85 syscallarg(lwpid_t *) new_lwp; 86 } */ *uap = v; 87 struct proc *p = l->l_proc; 88 struct lwp *l2; 89 vaddr_t uaddr; 90 bool inmem; 91 ucontext_t *newuc; 92 int error, lid; 93 94 newuc = pool_get(&lwp_uc_pool, PR_WAITOK); 95 96 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 97 if (error) { 98 pool_put(&lwp_uc_pool, newuc); 99 return error; 100 } 101 102 /* XXX check against resource limits */ 103 104 inmem = uvm_uarea_alloc(&uaddr); 105 if (__predict_false(uaddr == 0)) { 106 pool_put(&lwp_uc_pool, newuc); 107 return ENOMEM; 108 } 109 110 error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED, 111 NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); 112 if (error) { 113 uvm_uarea_free(uaddr, curcpu()); 114 pool_put(&lwp_uc_pool, newuc); 115 return error; 116 } 117 118 lid = l2->l_lid; 119 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 120 if (error) { 121 lwp_exit(l2); 122 pool_put(&lwp_uc_pool, newuc); 123 return error; 124 } 125 126 /* 127 * Set the new LWP running, unless the caller has requested that 128 * it be created in suspended state. If the process is stopping, 129 * then the LWP is created stopped. 130 */ 131 mutex_enter(&p->p_smutex); 132 lwp_lock(l2); 133 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && 134 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 135 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) 136 l2->l_stat = LSSTOP; 137 else { 138 KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex)); 139 p->p_nrlwps++; 140 l2->l_stat = LSRUN; 141 sched_enqueue(l2, false); 142 } 143 } else 144 l2->l_stat = LSSUSPENDED; 145 lwp_unlock(l2); 146 mutex_exit(&p->p_smutex); 147 148 return 0; 149 } 150 151 int 152 sys__lwp_exit(struct lwp *l, void *v, register_t *retval) 153 { 154 155 lwp_exit(l); 156 return 0; 157 } 158 159 int 160 sys__lwp_self(struct lwp *l, void *v, register_t *retval) 161 { 162 163 *retval = l->l_lid; 164 return 0; 165 } 166 167 int 168 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval) 169 { 170 171 *retval = (uintptr_t)l->l_private; 172 return 0; 173 } 174 175 int 176 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval) 177 { 178 struct sys__lwp_setprivate_args /* { 179 syscallarg(void *) ptr; 180 } */ *uap = v; 181 182 l->l_private = SCARG(uap, ptr); 183 return 0; 184 } 185 186 int 187 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval) 188 { 189 struct sys__lwp_suspend_args /* { 190 syscallarg(lwpid_t) target; 191 } */ *uap = v; 192 struct proc *p = l->l_proc; 193 struct lwp *t; 194 int error; 195 196 mutex_enter(&p->p_smutex); 197 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 198 mutex_exit(&p->p_smutex); 199 return ESRCH; 200 } 201 202 /* 203 * Check for deadlock, which is only possible when we're suspending 204 * ourself. XXX There is a short race here, as p_nrlwps is only 205 * incremented when an LWP suspends itself on the kernel/user 206 * boundary. It's still possible to kill -9 the process so we 207 * don't bother checking further. 208 */ 209 lwp_lock(t); 210 if ((t == l && p->p_nrlwps == 1) || 211 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 212 lwp_unlock(t); 213 mutex_exit(&p->p_smutex); 214 return EDEADLK; 215 } 216 217 /* 218 * Suspend the LWP. XXX If it's on a different CPU, we should wait 219 * for it to be preempted, where it will put itself to sleep. 220 * 221 * Suspension of the current LWP will happen on return to userspace. 222 */ 223 error = lwp_suspend(l, t); 224 if (error) { 225 mutex_exit(&p->p_smutex); 226 return error; 227 } 228 229 /* 230 * Wait for: 231 * o process exiting 232 * o target LWP suspended 233 * o target LWP not suspended and L_WSUSPEND clear 234 * o target LWP exited 235 */ 236 for (;;) { 237 error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex); 238 if (error) { 239 error = ERESTART; 240 break; 241 } 242 if (lwp_find(p, SCARG(uap, target)) == NULL) { 243 error = ESRCH; 244 break; 245 } 246 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 247 error = ERESTART; 248 break; 249 } 250 if (t->l_stat == LSSUSPENDED || 251 (t->l_flag & LW_WSUSPEND) == 0) 252 break; 253 } 254 mutex_exit(&p->p_smutex); 255 256 return error; 257 } 258 259 int 260 sys__lwp_continue(struct lwp *l, void *v, register_t *retval) 261 { 262 struct sys__lwp_continue_args /* { 263 syscallarg(lwpid_t) target; 264 } */ *uap = v; 265 int error; 266 struct proc *p = l->l_proc; 267 struct lwp *t; 268 269 error = 0; 270 271 mutex_enter(&p->p_smutex); 272 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 273 mutex_exit(&p->p_smutex); 274 return ESRCH; 275 } 276 277 lwp_lock(t); 278 lwp_continue(t); 279 mutex_exit(&p->p_smutex); 280 281 return error; 282 } 283 284 int 285 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval) 286 { 287 struct sys__lwp_wakeup_args /* { 288 syscallarg(lwpid_t) target; 289 } */ *uap = v; 290 struct lwp *t; 291 struct proc *p; 292 int error; 293 294 p = l->l_proc; 295 mutex_enter(&p->p_smutex); 296 297 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 298 mutex_exit(&p->p_smutex); 299 return ESRCH; 300 } 301 302 lwp_lock(t); 303 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 304 305 if (t->l_stat != LSSLEEP) { 306 lwp_unlock(t); 307 error = ENODEV; 308 } else if ((t->l_flag & LW_SINTR) == 0) { 309 lwp_unlock(t); 310 error = EBUSY; 311 } else { 312 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 313 lwp_unsleep(t); 314 error = 0; 315 } 316 317 mutex_exit(&p->p_smutex); 318 319 return error; 320 } 321 322 int 323 sys__lwp_wait(struct lwp *l, void *v, register_t *retval) 324 { 325 struct sys__lwp_wait_args /* { 326 syscallarg(lwpid_t) wait_for; 327 syscallarg(lwpid_t *) departed; 328 } */ *uap = v; 329 struct proc *p = l->l_proc; 330 int error; 331 lwpid_t dep; 332 333 mutex_enter(&p->p_smutex); 334 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0); 335 mutex_exit(&p->p_smutex); 336 337 if (error) 338 return error; 339 340 if (SCARG(uap, departed)) { 341 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 342 if (error) 343 return error; 344 } 345 346 return 0; 347 } 348 349 /* ARGSUSED */ 350 int 351 sys__lwp_kill(struct lwp *l, void *v, register_t *retval) 352 { 353 struct sys__lwp_kill_args /* { 354 syscallarg(lwpid_t) target; 355 syscallarg(int) signo; 356 } */ *uap = v; 357 struct proc *p = l->l_proc; 358 struct lwp *t; 359 ksiginfo_t ksi; 360 int signo = SCARG(uap, signo); 361 int error = 0; 362 363 if ((u_int)signo >= NSIG) 364 return EINVAL; 365 366 KSI_INIT(&ksi); 367 ksi.ksi_signo = signo; 368 ksi.ksi_code = SI_USER; 369 ksi.ksi_pid = p->p_pid; 370 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 371 ksi.ksi_lid = SCARG(uap, target); 372 373 mutex_enter(&proclist_mutex); 374 mutex_enter(&p->p_smutex); 375 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 376 error = ESRCH; 377 else if (signo != 0) 378 kpsignal2(p, &ksi); 379 mutex_exit(&p->p_smutex); 380 mutex_exit(&proclist_mutex); 381 382 return error; 383 } 384 385 int 386 sys__lwp_detach(struct lwp *l, void *v, register_t *retval) 387 { 388 struct sys__lwp_detach_args /* { 389 syscallarg(lwpid_t) target; 390 } */ *uap = v; 391 struct proc *p; 392 struct lwp *t; 393 lwpid_t target; 394 int error; 395 396 target = SCARG(uap, target); 397 p = l->l_proc; 398 399 mutex_enter(&p->p_smutex); 400 401 if (l->l_lid == target) 402 t = l; 403 else { 404 /* 405 * We can't use lwp_find() here because the target might 406 * be a zombie. 407 */ 408 LIST_FOREACH(t, &p->p_lwps, l_sibling) 409 if (t->l_lid == target) 410 break; 411 } 412 413 /* 414 * If the LWP is already detached, there's nothing to do. 415 * If it's a zombie, we need to clean up after it. LSZOMB 416 * is visible with the proc mutex held. 417 * 418 * After we have detached or released the LWP, kick any 419 * other LWPs that may be sitting in _lwp_wait(), waiting 420 * for the target LWP to exit. 421 */ 422 if (t != NULL && t->l_stat != LSIDL) { 423 if ((t->l_prflag & LPR_DETACHED) == 0) { 424 p->p_ndlwps++; 425 t->l_prflag |= LPR_DETACHED; 426 if (t->l_stat == LSZOMB) { 427 /* Releases proc mutex. */ 428 lwp_free(t, false, false); 429 return 0; 430 } 431 error = 0; 432 433 /* 434 * Have any LWPs sleeping in lwp_wait() recheck 435 * for deadlock. 436 */ 437 cv_broadcast(&p->p_lwpcv); 438 } else 439 error = EINVAL; 440 } else 441 error = ESRCH; 442 443 mutex_exit(&p->p_smutex); 444 445 return error; 446 } 447 448 static inline wchan_t 449 lwp_park_wchan(struct proc *p, const void *hint) 450 { 451 452 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 453 } 454 455 int 456 lwp_unpark(lwpid_t target, const void *hint) 457 { 458 sleepq_t *sq; 459 wchan_t wchan; 460 int swapin; 461 proc_t *p; 462 lwp_t *t; 463 464 /* 465 * Easy case: search for the LWP on the sleep queue. If 466 * it's parked, remove it from the queue and set running. 467 */ 468 p = curproc; 469 wchan = lwp_park_wchan(p, hint); 470 sq = sleeptab_lookup(&lwp_park_tab, wchan); 471 472 TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain) 473 if (t->l_proc == p && t->l_lid == target) 474 break; 475 476 if (__predict_true(t != NULL)) { 477 swapin = sleepq_remove(sq, t); 478 sleepq_unlock(sq); 479 if (swapin) 480 uvm_kick_scheduler(); 481 return 0; 482 } 483 484 /* 485 * The LWP hasn't parked yet. Take the hit and mark the 486 * operation as pending. 487 */ 488 sleepq_unlock(sq); 489 490 mutex_enter(&p->p_smutex); 491 if ((t = lwp_find(p, target)) == NULL) { 492 mutex_exit(&p->p_smutex); 493 return ESRCH; 494 } 495 496 /* 497 * It may not have parked yet, we may have raced, or it 498 * is parked on a different user sync object. 499 */ 500 lwp_lock(t); 501 if (t->l_syncobj == &lwp_park_sobj) { 502 /* Releases the LWP lock. */ 503 lwp_unsleep(t); 504 } else { 505 /* 506 * Set the operation pending. The next call to _lwp_park 507 * will return early. 508 */ 509 t->l_flag |= LW_UNPARKED; 510 lwp_unlock(t); 511 } 512 513 mutex_exit(&p->p_smutex); 514 return 0; 515 } 516 517 int 518 lwp_park(struct timespec *ts, const void *hint) 519 { 520 struct timespec tsx; 521 sleepq_t *sq; 522 wchan_t wchan; 523 int timo, error; 524 lwp_t *l; 525 526 /* Fix up the given timeout value. */ 527 if (ts != NULL) { 528 getnanotime(&tsx); 529 timespecsub(ts, &tsx, &tsx); 530 if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0)) 531 return ETIMEDOUT; 532 if ((error = itimespecfix(&tsx)) != 0) 533 return error; 534 timo = tstohz(&tsx); 535 KASSERT(timo != 0); 536 } else 537 timo = 0; 538 539 /* Find and lock the sleep queue. */ 540 l = curlwp; 541 wchan = lwp_park_wchan(l->l_proc, hint); 542 sq = sleeptab_lookup(&lwp_park_tab, wchan); 543 544 /* 545 * Before going the full route and blocking, check to see if an 546 * unpark op is pending. 547 */ 548 lwp_lock(l); 549 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 550 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 551 lwp_unlock(l); 552 sleepq_unlock(sq); 553 return EALREADY; 554 } 555 lwp_unlock_to(l, sq->sq_mutex); 556 l->l_biglocks = 0; 557 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 558 error = sleepq_block(timo, true); 559 switch (error) { 560 case EWOULDBLOCK: 561 error = ETIMEDOUT; 562 break; 563 case ERESTART: 564 error = EINTR; 565 break; 566 default: 567 /* nothing */ 568 break; 569 } 570 return error; 571 } 572 573 /* 574 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 575 * will remain parked until another LWP in the same process calls in and 576 * requests that it be unparked. 577 */ 578 int 579 sys__lwp_park(struct lwp *l, void *v, register_t *retval) 580 { 581 struct sys__lwp_park_args /* { 582 syscallarg(const struct timespec *) ts; 583 syscallarg(lwpid_t) unpark; 584 syscallarg(const void *) hint; 585 syscallarg(const void *) unparkhint; 586 } */ *uap = v; 587 struct timespec ts, *tsp; 588 int error; 589 590 if (SCARG(uap, ts) == NULL) 591 tsp = NULL; 592 else { 593 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 594 if (error != 0) 595 return error; 596 tsp = &ts; 597 } 598 599 if (SCARG(uap, unpark) != 0) { 600 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 601 if (error != 0) 602 return error; 603 } 604 605 return lwp_park(tsp, SCARG(uap, hint)); 606 } 607 608 int 609 sys__lwp_unpark(struct lwp *l, void *v, register_t *retval) 610 { 611 struct sys__lwp_unpark_args /* { 612 syscallarg(lwpid_t) target; 613 syscallarg(const void *) hint; 614 } */ *uap = v; 615 616 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 617 } 618 619 int 620 sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval) 621 { 622 struct sys__lwp_unpark_all_args /* { 623 syscallarg(const lwpid_t *) targets; 624 syscallarg(size_t) ntargets; 625 syscallarg(const void *) hint; 626 } */ *uap = v; 627 struct proc *p; 628 struct lwp *t; 629 sleepq_t *sq; 630 wchan_t wchan; 631 lwpid_t targets[32], *tp, *tpp, *tmax, target; 632 int swapin, error; 633 u_int ntargets; 634 size_t sz; 635 636 p = l->l_proc; 637 ntargets = SCARG(uap, ntargets); 638 639 if (SCARG(uap, targets) == NULL) { 640 /* 641 * Let the caller know how much we are willing to do, and 642 * let it unpark the LWPs in blocks. 643 */ 644 *retval = LWP_UNPARK_MAX; 645 return 0; 646 } 647 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 648 return EINVAL; 649 650 /* 651 * Copy in the target array. If it's a small number of LWPs, then 652 * place the numbers on the stack. 653 */ 654 sz = sizeof(target) * ntargets; 655 if (sz <= sizeof(targets)) 656 tp = targets; 657 else { 658 KERNEL_LOCK(1, l); /* XXXSMP */ 659 tp = kmem_alloc(sz, KM_SLEEP); 660 KERNEL_UNLOCK_ONE(l); /* XXXSMP */ 661 if (tp == NULL) 662 return ENOMEM; 663 } 664 error = copyin(SCARG(uap, targets), tp, sz); 665 if (error != 0) { 666 if (tp != targets) { 667 KERNEL_LOCK(1, l); /* XXXSMP */ 668 kmem_free(tp, sz); 669 KERNEL_UNLOCK_ONE(l); /* XXXSMP */ 670 } 671 return error; 672 } 673 674 swapin = 0; 675 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 676 sq = sleeptab_lookup(&lwp_park_tab, wchan); 677 678 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 679 target = *tpp; 680 681 /* 682 * Easy case: search for the LWP on the sleep queue. If 683 * it's parked, remove it from the queue and set running. 684 */ 685 TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain) 686 if (t->l_proc == p && t->l_lid == target) 687 break; 688 689 if (t != NULL) { 690 swapin |= sleepq_remove(sq, t); 691 continue; 692 } 693 694 /* 695 * The LWP hasn't parked yet. Take the hit and 696 * mark the operation as pending. 697 */ 698 sleepq_unlock(sq); 699 mutex_enter(&p->p_smutex); 700 if ((t = lwp_find(p, target)) == NULL) { 701 mutex_exit(&p->p_smutex); 702 sleepq_lock(sq); 703 continue; 704 } 705 lwp_lock(t); 706 707 /* 708 * It may not have parked yet, we may have raced, or 709 * it is parked on a different user sync object. 710 */ 711 if (t->l_syncobj == &lwp_park_sobj) { 712 /* Releases the LWP lock. */ 713 lwp_unsleep(t); 714 } else { 715 /* 716 * Set the operation pending. The next call to 717 * _lwp_park will return early. 718 */ 719 t->l_flag |= LW_UNPARKED; 720 lwp_unlock(t); 721 } 722 723 mutex_exit(&p->p_smutex); 724 sleepq_lock(sq); 725 } 726 727 sleepq_unlock(sq); 728 if (tp != targets) { 729 KERNEL_LOCK(1, l); /* XXXSMP */ 730 kmem_free(tp, sz); 731 KERNEL_UNLOCK_ONE(l); /* XXXSMP */ 732 } 733 if (swapin) 734 uvm_kick_scheduler(); 735 736 return 0; 737 } 738 739 int 740 sys__lwp_setname(struct lwp *l, void *v, register_t *retval) 741 { 742 struct sys__lwp_setname_args /* { 743 syscallarg(lwpid_t) target; 744 syscallarg(const char *) name; 745 } */ *uap = v; 746 char *name, *oname; 747 lwpid_t target; 748 proc_t *p; 749 lwp_t *t; 750 int error; 751 752 if ((target = SCARG(uap, target)) == 0) 753 target = l->l_lid; 754 755 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 756 if (name == NULL) 757 return ENOMEM; 758 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 759 switch (error) { 760 case ENAMETOOLONG: 761 case 0: 762 name[MAXCOMLEN - 1] = '\0'; 763 break; 764 default: 765 kmem_free(name, MAXCOMLEN); 766 return error; 767 } 768 769 p = curproc; 770 mutex_enter(&p->p_smutex); 771 if ((t = lwp_find(p, target)) == NULL) { 772 mutex_exit(&p->p_smutex); 773 kmem_free(name, MAXCOMLEN); 774 return ESRCH; 775 } 776 lwp_lock(t); 777 oname = t->l_name; 778 t->l_name = name; 779 lwp_unlock(t); 780 mutex_exit(&p->p_smutex); 781 782 if (oname != NULL) 783 kmem_free(oname, MAXCOMLEN); 784 785 return 0; 786 } 787 788 int 789 sys__lwp_getname(struct lwp *l, void *v, register_t *retval) 790 { 791 struct sys__lwp_getname_args /* { 792 syscallarg(lwpid_t) target; 793 syscallarg(char *) name; 794 syscallarg(size_t) len; 795 } */ *uap = v; 796 char name[MAXCOMLEN]; 797 lwpid_t target; 798 proc_t *p; 799 lwp_t *t; 800 801 if ((target = SCARG(uap, target)) == 0) 802 target = l->l_lid; 803 804 p = curproc; 805 mutex_enter(&p->p_smutex); 806 if ((t = lwp_find(p, target)) == NULL) { 807 mutex_exit(&p->p_smutex); 808 return ESRCH; 809 } 810 lwp_lock(t); 811 if (t->l_name == NULL) 812 name[0] = '\0'; 813 else 814 strcpy(name, t->l_name); 815 lwp_unlock(t); 816 mutex_exit(&p->p_smutex); 817 818 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 819 } 820 821 int 822 sys__lwp_ctl(struct lwp *l, void *v, register_t *retval) 823 { 824 struct sys__lwp_ctl_args /* { 825 syscallarg(int) features; 826 syscallarg(struct lwpctl **) address; 827 } */ *uap = v; 828 int error, features; 829 vaddr_t vaddr; 830 831 features = SCARG(uap, features); 832 if ((features & ~LWPCTL_FEATURE_CURCPU) != 0) 833 return ENODEV; 834 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 835 return error; 836 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 837 } 838