1 /* $NetBSD: sys_lwp.c,v 1.34 2008/02/14 14:26:57 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 41 * of LWPs. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.34 2008/02/14 14:26:57 ad Exp $"); 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/pool.h> 50 #include <sys/proc.h> 51 #include <sys/types.h> 52 #include <sys/syscallargs.h> 53 #include <sys/kauth.h> 54 #include <sys/kmem.h> 55 #include <sys/sleepq.h> 56 #include <sys/lwpctl.h> 57 58 #include <uvm/uvm_extern.h> 59 60 #define LWP_UNPARK_MAX 1024 61 62 syncobj_t lwp_park_sobj = { 63 SOBJ_SLEEPQ_LIFO, 64 sleepq_unsleep, 65 sleepq_changepri, 66 sleepq_lendpri, 67 syncobj_noowner, 68 }; 69 70 sleeptab_t lwp_park_tab; 71 72 void 73 lwp_sys_init(void) 74 { 75 sleeptab_init(&lwp_park_tab); 76 } 77 78 /* ARGSUSED */ 79 int 80 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval) 81 { 82 /* { 83 syscallarg(const ucontext_t *) ucp; 84 syscallarg(u_long) flags; 85 syscallarg(lwpid_t *) new_lwp; 86 } */ 87 struct proc *p = l->l_proc; 88 struct lwp *l2; 89 vaddr_t uaddr; 90 bool inmem; 91 ucontext_t *newuc; 92 int error, lid; 93 94 newuc = pool_get(&lwp_uc_pool, PR_WAITOK); 95 96 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 97 if (error) { 98 pool_put(&lwp_uc_pool, newuc); 99 return error; 100 } 101 102 /* XXX check against resource limits */ 103 104 inmem = uvm_uarea_alloc(&uaddr); 105 if (__predict_false(uaddr == 0)) { 106 pool_put(&lwp_uc_pool, newuc); 107 return ENOMEM; 108 } 109 110 error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED, 111 NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); 112 if (error) { 113 uvm_uarea_free(uaddr, curcpu()); 114 pool_put(&lwp_uc_pool, newuc); 115 return error; 116 } 117 118 lid = l2->l_lid; 119 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 120 if (error) { 121 lwp_exit(l2); 122 pool_put(&lwp_uc_pool, newuc); 123 return error; 124 } 125 126 /* 127 * Set the new LWP running, unless the caller has requested that 128 * it be created in suspended state. If the process is stopping, 129 * then the LWP is created stopped. 130 */ 131 mutex_enter(&p->p_smutex); 132 lwp_lock(l2); 133 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && 134 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 135 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) 136 l2->l_stat = LSSTOP; 137 else { 138 KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex)); 139 p->p_nrlwps++; 140 l2->l_stat = LSRUN; 141 sched_enqueue(l2, false); 142 } 143 lwp_unlock(l2); 144 } else { 145 l2->l_stat = LSSUSPENDED; 146 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock); 147 } 148 mutex_exit(&p->p_smutex); 149 150 return 0; 151 } 152 153 int 154 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 155 { 156 157 lwp_exit(l); 158 return 0; 159 } 160 161 int 162 sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 163 { 164 165 *retval = l->l_lid; 166 return 0; 167 } 168 169 int 170 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 171 { 172 173 *retval = (uintptr_t)l->l_private; 174 return 0; 175 } 176 177 int 178 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, register_t *retval) 179 { 180 /* { 181 syscallarg(void *) ptr; 182 } */ 183 184 l->l_private = SCARG(uap, ptr); 185 return 0; 186 } 187 188 int 189 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, register_t *retval) 190 { 191 /* { 192 syscallarg(lwpid_t) target; 193 } */ 194 struct proc *p = l->l_proc; 195 struct lwp *t; 196 int error; 197 198 mutex_enter(&p->p_smutex); 199 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 200 mutex_exit(&p->p_smutex); 201 return ESRCH; 202 } 203 204 /* 205 * Check for deadlock, which is only possible when we're suspending 206 * ourself. XXX There is a short race here, as p_nrlwps is only 207 * incremented when an LWP suspends itself on the kernel/user 208 * boundary. It's still possible to kill -9 the process so we 209 * don't bother checking further. 210 */ 211 lwp_lock(t); 212 if ((t == l && p->p_nrlwps == 1) || 213 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 214 lwp_unlock(t); 215 mutex_exit(&p->p_smutex); 216 return EDEADLK; 217 } 218 219 /* 220 * Suspend the LWP. XXX If it's on a different CPU, we should wait 221 * for it to be preempted, where it will put itself to sleep. 222 * 223 * Suspension of the current LWP will happen on return to userspace. 224 */ 225 error = lwp_suspend(l, t); 226 if (error) { 227 mutex_exit(&p->p_smutex); 228 return error; 229 } 230 231 /* 232 * Wait for: 233 * o process exiting 234 * o target LWP suspended 235 * o target LWP not suspended and L_WSUSPEND clear 236 * o target LWP exited 237 */ 238 for (;;) { 239 error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex); 240 if (error) { 241 error = ERESTART; 242 break; 243 } 244 if (lwp_find(p, SCARG(uap, target)) == NULL) { 245 error = ESRCH; 246 break; 247 } 248 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 249 error = ERESTART; 250 break; 251 } 252 if (t->l_stat == LSSUSPENDED || 253 (t->l_flag & LW_WSUSPEND) == 0) 254 break; 255 } 256 mutex_exit(&p->p_smutex); 257 258 return error; 259 } 260 261 int 262 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, register_t *retval) 263 { 264 /* { 265 syscallarg(lwpid_t) target; 266 } */ 267 int error; 268 struct proc *p = l->l_proc; 269 struct lwp *t; 270 271 error = 0; 272 273 mutex_enter(&p->p_smutex); 274 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 275 mutex_exit(&p->p_smutex); 276 return ESRCH; 277 } 278 279 lwp_lock(t); 280 lwp_continue(t); 281 mutex_exit(&p->p_smutex); 282 283 return error; 284 } 285 286 int 287 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, register_t *retval) 288 { 289 /* { 290 syscallarg(lwpid_t) target; 291 } */ 292 struct lwp *t; 293 struct proc *p; 294 int error; 295 296 p = l->l_proc; 297 mutex_enter(&p->p_smutex); 298 299 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 300 mutex_exit(&p->p_smutex); 301 return ESRCH; 302 } 303 304 lwp_lock(t); 305 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 306 307 if (t->l_stat != LSSLEEP) { 308 lwp_unlock(t); 309 error = ENODEV; 310 } else if ((t->l_flag & LW_SINTR) == 0) { 311 lwp_unlock(t); 312 error = EBUSY; 313 } else { 314 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 315 lwp_unsleep(t); 316 error = 0; 317 } 318 319 mutex_exit(&p->p_smutex); 320 321 return error; 322 } 323 324 int 325 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, register_t *retval) 326 { 327 /* { 328 syscallarg(lwpid_t) wait_for; 329 syscallarg(lwpid_t *) departed; 330 } */ 331 struct proc *p = l->l_proc; 332 int error; 333 lwpid_t dep; 334 335 mutex_enter(&p->p_smutex); 336 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0); 337 mutex_exit(&p->p_smutex); 338 339 if (error) 340 return error; 341 342 if (SCARG(uap, departed)) { 343 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 344 if (error) 345 return error; 346 } 347 348 return 0; 349 } 350 351 /* ARGSUSED */ 352 int 353 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, register_t *retval) 354 { 355 /* { 356 syscallarg(lwpid_t) target; 357 syscallarg(int) signo; 358 } */ 359 struct proc *p = l->l_proc; 360 struct lwp *t; 361 ksiginfo_t ksi; 362 int signo = SCARG(uap, signo); 363 int error = 0; 364 365 if ((u_int)signo >= NSIG) 366 return EINVAL; 367 368 KSI_INIT(&ksi); 369 ksi.ksi_signo = signo; 370 ksi.ksi_code = SI_USER; 371 ksi.ksi_pid = p->p_pid; 372 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 373 ksi.ksi_lid = SCARG(uap, target); 374 375 mutex_enter(&proclist_mutex); 376 mutex_enter(&p->p_smutex); 377 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 378 error = ESRCH; 379 else if (signo != 0) 380 kpsignal2(p, &ksi); 381 mutex_exit(&p->p_smutex); 382 mutex_exit(&proclist_mutex); 383 384 return error; 385 } 386 387 int 388 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, register_t *retval) 389 { 390 /* { 391 syscallarg(lwpid_t) target; 392 } */ 393 struct proc *p; 394 struct lwp *t; 395 lwpid_t target; 396 int error; 397 398 target = SCARG(uap, target); 399 p = l->l_proc; 400 401 mutex_enter(&p->p_smutex); 402 403 if (l->l_lid == target) 404 t = l; 405 else { 406 /* 407 * We can't use lwp_find() here because the target might 408 * be a zombie. 409 */ 410 LIST_FOREACH(t, &p->p_lwps, l_sibling) 411 if (t->l_lid == target) 412 break; 413 } 414 415 /* 416 * If the LWP is already detached, there's nothing to do. 417 * If it's a zombie, we need to clean up after it. LSZOMB 418 * is visible with the proc mutex held. 419 * 420 * After we have detached or released the LWP, kick any 421 * other LWPs that may be sitting in _lwp_wait(), waiting 422 * for the target LWP to exit. 423 */ 424 if (t != NULL && t->l_stat != LSIDL) { 425 if ((t->l_prflag & LPR_DETACHED) == 0) { 426 p->p_ndlwps++; 427 t->l_prflag |= LPR_DETACHED; 428 if (t->l_stat == LSZOMB) { 429 /* Releases proc mutex. */ 430 lwp_free(t, false, false); 431 return 0; 432 } 433 error = 0; 434 435 /* 436 * Have any LWPs sleeping in lwp_wait() recheck 437 * for deadlock. 438 */ 439 cv_broadcast(&p->p_lwpcv); 440 } else 441 error = EINVAL; 442 } else 443 error = ESRCH; 444 445 mutex_exit(&p->p_smutex); 446 447 return error; 448 } 449 450 static inline wchan_t 451 lwp_park_wchan(struct proc *p, const void *hint) 452 { 453 454 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 455 } 456 457 int 458 lwp_unpark(lwpid_t target, const void *hint) 459 { 460 sleepq_t *sq; 461 wchan_t wchan; 462 int swapin; 463 proc_t *p; 464 lwp_t *t; 465 466 /* 467 * Easy case: search for the LWP on the sleep queue. If 468 * it's parked, remove it from the queue and set running. 469 */ 470 p = curproc; 471 wchan = lwp_park_wchan(p, hint); 472 sq = sleeptab_lookup(&lwp_park_tab, wchan); 473 474 TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain) 475 if (t->l_proc == p && t->l_lid == target) 476 break; 477 478 if (__predict_true(t != NULL)) { 479 swapin = sleepq_remove(sq, t); 480 sleepq_unlock(sq); 481 if (swapin) 482 uvm_kick_scheduler(); 483 return 0; 484 } 485 486 /* 487 * The LWP hasn't parked yet. Take the hit and mark the 488 * operation as pending. 489 */ 490 sleepq_unlock(sq); 491 492 mutex_enter(&p->p_smutex); 493 if ((t = lwp_find(p, target)) == NULL) { 494 mutex_exit(&p->p_smutex); 495 return ESRCH; 496 } 497 498 /* 499 * It may not have parked yet, we may have raced, or it 500 * is parked on a different user sync object. 501 */ 502 lwp_lock(t); 503 if (t->l_syncobj == &lwp_park_sobj) { 504 /* Releases the LWP lock. */ 505 lwp_unsleep(t); 506 } else { 507 /* 508 * Set the operation pending. The next call to _lwp_park 509 * will return early. 510 */ 511 t->l_flag |= LW_UNPARKED; 512 lwp_unlock(t); 513 } 514 515 mutex_exit(&p->p_smutex); 516 return 0; 517 } 518 519 int 520 lwp_park(struct timespec *ts, const void *hint) 521 { 522 struct timespec tsx; 523 sleepq_t *sq; 524 wchan_t wchan; 525 int timo, error; 526 lwp_t *l; 527 528 /* Fix up the given timeout value. */ 529 if (ts != NULL) { 530 getnanotime(&tsx); 531 timespecsub(ts, &tsx, &tsx); 532 if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0)) 533 return ETIMEDOUT; 534 if ((error = itimespecfix(&tsx)) != 0) 535 return error; 536 timo = tstohz(&tsx); 537 KASSERT(timo != 0); 538 } else 539 timo = 0; 540 541 /* Find and lock the sleep queue. */ 542 l = curlwp; 543 wchan = lwp_park_wchan(l->l_proc, hint); 544 sq = sleeptab_lookup(&lwp_park_tab, wchan); 545 546 /* 547 * Before going the full route and blocking, check to see if an 548 * unpark op is pending. 549 */ 550 lwp_lock(l); 551 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 552 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 553 lwp_unlock(l); 554 sleepq_unlock(sq); 555 return EALREADY; 556 } 557 lwp_unlock_to(l, sq->sq_mutex); 558 l->l_biglocks = 0; 559 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 560 error = sleepq_block(timo, true); 561 switch (error) { 562 case EWOULDBLOCK: 563 error = ETIMEDOUT; 564 break; 565 case ERESTART: 566 error = EINTR; 567 break; 568 default: 569 /* nothing */ 570 break; 571 } 572 return error; 573 } 574 575 /* 576 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 577 * will remain parked until another LWP in the same process calls in and 578 * requests that it be unparked. 579 */ 580 int 581 sys__lwp_park(struct lwp *l, const struct sys__lwp_park_args *uap, register_t *retval) 582 { 583 /* { 584 syscallarg(const struct timespec *) ts; 585 syscallarg(lwpid_t) unpark; 586 syscallarg(const void *) hint; 587 syscallarg(const void *) unparkhint; 588 } */ 589 struct timespec ts, *tsp; 590 int error; 591 592 if (SCARG(uap, ts) == NULL) 593 tsp = NULL; 594 else { 595 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 596 if (error != 0) 597 return error; 598 tsp = &ts; 599 } 600 601 if (SCARG(uap, unpark) != 0) { 602 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 603 if (error != 0) 604 return error; 605 } 606 607 return lwp_park(tsp, SCARG(uap, hint)); 608 } 609 610 int 611 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, register_t *retval) 612 { 613 /* { 614 syscallarg(lwpid_t) target; 615 syscallarg(const void *) hint; 616 } */ 617 618 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 619 } 620 621 int 622 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval) 623 { 624 /* { 625 syscallarg(const lwpid_t *) targets; 626 syscallarg(size_t) ntargets; 627 syscallarg(const void *) hint; 628 } */ 629 struct proc *p; 630 struct lwp *t; 631 sleepq_t *sq; 632 wchan_t wchan; 633 lwpid_t targets[32], *tp, *tpp, *tmax, target; 634 int swapin, error; 635 u_int ntargets; 636 size_t sz; 637 638 p = l->l_proc; 639 ntargets = SCARG(uap, ntargets); 640 641 if (SCARG(uap, targets) == NULL) { 642 /* 643 * Let the caller know how much we are willing to do, and 644 * let it unpark the LWPs in blocks. 645 */ 646 *retval = LWP_UNPARK_MAX; 647 return 0; 648 } 649 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 650 return EINVAL; 651 652 /* 653 * Copy in the target array. If it's a small number of LWPs, then 654 * place the numbers on the stack. 655 */ 656 sz = sizeof(target) * ntargets; 657 if (sz <= sizeof(targets)) 658 tp = targets; 659 else { 660 tp = kmem_alloc(sz, KM_SLEEP); 661 if (tp == NULL) 662 return ENOMEM; 663 } 664 error = copyin(SCARG(uap, targets), tp, sz); 665 if (error != 0) { 666 if (tp != targets) { 667 kmem_free(tp, sz); 668 } 669 return error; 670 } 671 672 swapin = 0; 673 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 674 sq = sleeptab_lookup(&lwp_park_tab, wchan); 675 676 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 677 target = *tpp; 678 679 /* 680 * Easy case: search for the LWP on the sleep queue. If 681 * it's parked, remove it from the queue and set running. 682 */ 683 TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain) 684 if (t->l_proc == p && t->l_lid == target) 685 break; 686 687 if (t != NULL) { 688 swapin |= sleepq_remove(sq, t); 689 continue; 690 } 691 692 /* 693 * The LWP hasn't parked yet. Take the hit and 694 * mark the operation as pending. 695 */ 696 sleepq_unlock(sq); 697 mutex_enter(&p->p_smutex); 698 if ((t = lwp_find(p, target)) == NULL) { 699 mutex_exit(&p->p_smutex); 700 sleepq_lock(sq); 701 continue; 702 } 703 lwp_lock(t); 704 705 /* 706 * It may not have parked yet, we may have raced, or 707 * it is parked on a different user sync object. 708 */ 709 if (t->l_syncobj == &lwp_park_sobj) { 710 /* Releases the LWP lock. */ 711 lwp_unsleep(t); 712 } else { 713 /* 714 * Set the operation pending. The next call to 715 * _lwp_park will return early. 716 */ 717 t->l_flag |= LW_UNPARKED; 718 lwp_unlock(t); 719 } 720 721 mutex_exit(&p->p_smutex); 722 sleepq_lock(sq); 723 } 724 725 sleepq_unlock(sq); 726 if (tp != targets) 727 kmem_free(tp, sz); 728 if (swapin) 729 uvm_kick_scheduler(); 730 731 return 0; 732 } 733 734 int 735 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, register_t *retval) 736 { 737 /* { 738 syscallarg(lwpid_t) target; 739 syscallarg(const char *) name; 740 } */ 741 char *name, *oname; 742 lwpid_t target; 743 proc_t *p; 744 lwp_t *t; 745 int error; 746 747 if ((target = SCARG(uap, target)) == 0) 748 target = l->l_lid; 749 750 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 751 if (name == NULL) 752 return ENOMEM; 753 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 754 switch (error) { 755 case ENAMETOOLONG: 756 case 0: 757 name[MAXCOMLEN - 1] = '\0'; 758 break; 759 default: 760 kmem_free(name, MAXCOMLEN); 761 return error; 762 } 763 764 p = curproc; 765 mutex_enter(&p->p_smutex); 766 if ((t = lwp_find(p, target)) == NULL) { 767 mutex_exit(&p->p_smutex); 768 kmem_free(name, MAXCOMLEN); 769 return ESRCH; 770 } 771 lwp_lock(t); 772 oname = t->l_name; 773 t->l_name = name; 774 lwp_unlock(t); 775 mutex_exit(&p->p_smutex); 776 777 if (oname != NULL) 778 kmem_free(oname, MAXCOMLEN); 779 780 return 0; 781 } 782 783 int 784 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, register_t *retval) 785 { 786 /* { 787 syscallarg(lwpid_t) target; 788 syscallarg(char *) name; 789 syscallarg(size_t) len; 790 } */ 791 char name[MAXCOMLEN]; 792 lwpid_t target; 793 proc_t *p; 794 lwp_t *t; 795 796 if ((target = SCARG(uap, target)) == 0) 797 target = l->l_lid; 798 799 p = curproc; 800 mutex_enter(&p->p_smutex); 801 if ((t = lwp_find(p, target)) == NULL) { 802 mutex_exit(&p->p_smutex); 803 return ESRCH; 804 } 805 lwp_lock(t); 806 if (t->l_name == NULL) 807 name[0] = '\0'; 808 else 809 strcpy(name, t->l_name); 810 lwp_unlock(t); 811 mutex_exit(&p->p_smutex); 812 813 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 814 } 815 816 int 817 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, register_t *retval) 818 { 819 /* { 820 syscallarg(int) features; 821 syscallarg(struct lwpctl **) address; 822 } */ 823 int error, features; 824 vaddr_t vaddr; 825 826 features = SCARG(uap, features); 827 if ((features & ~LWPCTL_FEATURE_CURCPU) != 0) 828 return ENODEV; 829 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 830 return error; 831 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 832 } 833