1 /* $NetBSD: sys_lwp.c,v 1.45 2009/03/29 09:24:52 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 34 * of LWPs. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.45 2009/03/29 09:24:52 ad Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/pool.h> 43 #include <sys/proc.h> 44 #include <sys/types.h> 45 #include <sys/syscallargs.h> 46 #include <sys/kauth.h> 47 #include <sys/kmem.h> 48 #include <sys/sleepq.h> 49 #include <sys/lwpctl.h> 50 #include <sys/cpu.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include "opt_sa.h" 55 56 #define LWP_UNPARK_MAX 1024 57 58 syncobj_t lwp_park_sobj = { 59 SOBJ_SLEEPQ_LIFO, 60 sleepq_unsleep, 61 sleepq_changepri, 62 sleepq_lendpri, 63 syncobj_noowner, 64 }; 65 66 sleeptab_t lwp_park_tab; 67 68 void 69 lwp_sys_init(void) 70 { 71 sleeptab_init(&lwp_park_tab); 72 } 73 74 /* ARGSUSED */ 75 int 76 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval) 77 { 78 /* { 79 syscallarg(const ucontext_t *) ucp; 80 syscallarg(u_long) flags; 81 syscallarg(lwpid_t *) new_lwp; 82 } */ 83 struct proc *p = l->l_proc; 84 struct lwp *l2; 85 vaddr_t uaddr; 86 bool inmem; 87 ucontext_t *newuc; 88 int error, lid; 89 90 #ifdef KERN_SA 91 mutex_enter(p->p_lock); 92 if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) { 93 mutex_exit(p->p_lock); 94 return EINVAL; 95 } 96 mutex_exit(p->p_lock); 97 #endif 98 99 newuc = pool_get(&lwp_uc_pool, PR_WAITOK); 100 101 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 102 if (error) { 103 pool_put(&lwp_uc_pool, newuc); 104 return error; 105 } 106 107 /* XXX check against resource limits */ 108 109 inmem = uvm_uarea_alloc(&uaddr); 110 if (__predict_false(uaddr == 0)) { 111 pool_put(&lwp_uc_pool, newuc); 112 return ENOMEM; 113 } 114 115 error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED, 116 NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); 117 if (error) { 118 uvm_uarea_free(uaddr, curcpu()); 119 pool_put(&lwp_uc_pool, newuc); 120 return error; 121 } 122 123 lid = l2->l_lid; 124 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 125 if (error) { 126 lwp_exit(l2); 127 pool_put(&lwp_uc_pool, newuc); 128 return error; 129 } 130 131 /* 132 * Set the new LWP running, unless the caller has requested that 133 * it be created in suspended state. If the process is stopping, 134 * then the LWP is created stopped. 135 */ 136 mutex_enter(p->p_lock); 137 lwp_lock(l2); 138 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && 139 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 140 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) 141 l2->l_stat = LSSTOP; 142 else { 143 KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex)); 144 p->p_nrlwps++; 145 l2->l_stat = LSRUN; 146 sched_enqueue(l2, false); 147 } 148 lwp_unlock(l2); 149 } else { 150 l2->l_stat = LSSUSPENDED; 151 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock); 152 } 153 mutex_exit(p->p_lock); 154 155 return 0; 156 } 157 158 int 159 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 160 { 161 162 lwp_exit(l); 163 return 0; 164 } 165 166 int 167 sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 168 { 169 170 *retval = l->l_lid; 171 return 0; 172 } 173 174 int 175 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 176 { 177 178 *retval = (uintptr_t)l->l_private; 179 return 0; 180 } 181 182 int 183 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, register_t *retval) 184 { 185 /* { 186 syscallarg(void *) ptr; 187 } */ 188 189 l->l_private = SCARG(uap, ptr); 190 #ifdef __HAVE_CPU_LWP_SETPRIVATE 191 cpu_lwp_setprivate(l, SCARG(uap, ptr)); 192 #endif 193 194 return 0; 195 } 196 197 int 198 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, register_t *retval) 199 { 200 /* { 201 syscallarg(lwpid_t) target; 202 } */ 203 struct proc *p = l->l_proc; 204 struct lwp *t; 205 int error; 206 207 mutex_enter(p->p_lock); 208 209 #ifdef KERN_SA 210 if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) { 211 mutex_exit(p->p_lock); 212 return EINVAL; 213 } 214 #endif 215 216 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 217 mutex_exit(p->p_lock); 218 return ESRCH; 219 } 220 221 /* 222 * Check for deadlock, which is only possible when we're suspending 223 * ourself. XXX There is a short race here, as p_nrlwps is only 224 * incremented when an LWP suspends itself on the kernel/user 225 * boundary. It's still possible to kill -9 the process so we 226 * don't bother checking further. 227 */ 228 lwp_lock(t); 229 if ((t == l && p->p_nrlwps == 1) || 230 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 231 lwp_unlock(t); 232 mutex_exit(p->p_lock); 233 return EDEADLK; 234 } 235 236 /* 237 * Suspend the LWP. XXX If it's on a different CPU, we should wait 238 * for it to be preempted, where it will put itself to sleep. 239 * 240 * Suspension of the current LWP will happen on return to userspace. 241 */ 242 error = lwp_suspend(l, t); 243 if (error) { 244 mutex_exit(p->p_lock); 245 return error; 246 } 247 248 /* 249 * Wait for: 250 * o process exiting 251 * o target LWP suspended 252 * o target LWP not suspended and L_WSUSPEND clear 253 * o target LWP exited 254 */ 255 for (;;) { 256 error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 257 if (error) { 258 error = ERESTART; 259 break; 260 } 261 if (lwp_find(p, SCARG(uap, target)) == NULL) { 262 error = ESRCH; 263 break; 264 } 265 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 266 error = ERESTART; 267 break; 268 } 269 if (t->l_stat == LSSUSPENDED || 270 (t->l_flag & LW_WSUSPEND) == 0) 271 break; 272 } 273 mutex_exit(p->p_lock); 274 275 return error; 276 } 277 278 int 279 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, register_t *retval) 280 { 281 /* { 282 syscallarg(lwpid_t) target; 283 } */ 284 int error; 285 struct proc *p = l->l_proc; 286 struct lwp *t; 287 288 error = 0; 289 290 mutex_enter(p->p_lock); 291 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 292 mutex_exit(p->p_lock); 293 return ESRCH; 294 } 295 296 lwp_lock(t); 297 lwp_continue(t); 298 mutex_exit(p->p_lock); 299 300 return error; 301 } 302 303 int 304 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, register_t *retval) 305 { 306 /* { 307 syscallarg(lwpid_t) target; 308 } */ 309 struct lwp *t; 310 struct proc *p; 311 int error; 312 313 p = l->l_proc; 314 mutex_enter(p->p_lock); 315 316 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 317 mutex_exit(p->p_lock); 318 return ESRCH; 319 } 320 321 lwp_lock(t); 322 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 323 324 if (t->l_stat != LSSLEEP) { 325 lwp_unlock(t); 326 error = ENODEV; 327 } else if ((t->l_flag & LW_SINTR) == 0) { 328 lwp_unlock(t); 329 error = EBUSY; 330 } else { 331 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 332 (void)lwp_unsleep(t, true); 333 error = 0; 334 } 335 336 mutex_exit(p->p_lock); 337 338 return error; 339 } 340 341 int 342 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, register_t *retval) 343 { 344 /* { 345 syscallarg(lwpid_t) wait_for; 346 syscallarg(lwpid_t *) departed; 347 } */ 348 struct proc *p = l->l_proc; 349 int error; 350 lwpid_t dep; 351 352 mutex_enter(p->p_lock); 353 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0); 354 mutex_exit(p->p_lock); 355 356 if (error) 357 return error; 358 359 if (SCARG(uap, departed)) { 360 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 361 if (error) 362 return error; 363 } 364 365 return 0; 366 } 367 368 /* ARGSUSED */ 369 int 370 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, register_t *retval) 371 { 372 /* { 373 syscallarg(lwpid_t) target; 374 syscallarg(int) signo; 375 } */ 376 struct proc *p = l->l_proc; 377 struct lwp *t; 378 ksiginfo_t ksi; 379 int signo = SCARG(uap, signo); 380 int error = 0; 381 382 if ((u_int)signo >= NSIG) 383 return EINVAL; 384 385 KSI_INIT(&ksi); 386 ksi.ksi_signo = signo; 387 ksi.ksi_code = SI_LWP; 388 ksi.ksi_pid = p->p_pid; 389 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 390 ksi.ksi_lid = SCARG(uap, target); 391 392 mutex_enter(proc_lock); 393 mutex_enter(p->p_lock); 394 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 395 error = ESRCH; 396 else if (signo != 0) 397 kpsignal2(p, &ksi); 398 mutex_exit(p->p_lock); 399 mutex_exit(proc_lock); 400 401 return error; 402 } 403 404 int 405 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, register_t *retval) 406 { 407 /* { 408 syscallarg(lwpid_t) target; 409 } */ 410 struct proc *p; 411 struct lwp *t; 412 lwpid_t target; 413 int error; 414 415 target = SCARG(uap, target); 416 p = l->l_proc; 417 418 mutex_enter(p->p_lock); 419 420 if (l->l_lid == target) 421 t = l; 422 else { 423 /* 424 * We can't use lwp_find() here because the target might 425 * be a zombie. 426 */ 427 LIST_FOREACH(t, &p->p_lwps, l_sibling) 428 if (t->l_lid == target) 429 break; 430 } 431 432 /* 433 * If the LWP is already detached, there's nothing to do. 434 * If it's a zombie, we need to clean up after it. LSZOMB 435 * is visible with the proc mutex held. 436 * 437 * After we have detached or released the LWP, kick any 438 * other LWPs that may be sitting in _lwp_wait(), waiting 439 * for the target LWP to exit. 440 */ 441 if (t != NULL && t->l_stat != LSIDL) { 442 if ((t->l_prflag & LPR_DETACHED) == 0) { 443 p->p_ndlwps++; 444 t->l_prflag |= LPR_DETACHED; 445 if (t->l_stat == LSZOMB) { 446 /* Releases proc mutex. */ 447 lwp_free(t, false, false); 448 return 0; 449 } 450 error = 0; 451 452 /* 453 * Have any LWPs sleeping in lwp_wait() recheck 454 * for deadlock. 455 */ 456 cv_broadcast(&p->p_lwpcv); 457 } else 458 error = EINVAL; 459 } else 460 error = ESRCH; 461 462 mutex_exit(p->p_lock); 463 464 return error; 465 } 466 467 static inline wchan_t 468 lwp_park_wchan(struct proc *p, const void *hint) 469 { 470 471 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 472 } 473 474 int 475 lwp_unpark(lwpid_t target, const void *hint) 476 { 477 sleepq_t *sq; 478 wchan_t wchan; 479 int swapin; 480 kmutex_t *mp; 481 proc_t *p; 482 lwp_t *t; 483 484 /* 485 * Easy case: search for the LWP on the sleep queue. If 486 * it's parked, remove it from the queue and set running. 487 */ 488 p = curproc; 489 wchan = lwp_park_wchan(p, hint); 490 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 491 492 TAILQ_FOREACH(t, sq, l_sleepchain) 493 if (t->l_proc == p && t->l_lid == target) 494 break; 495 496 if (__predict_true(t != NULL)) { 497 swapin = sleepq_remove(sq, t); 498 mutex_spin_exit(mp); 499 if (swapin) 500 uvm_kick_scheduler(); 501 return 0; 502 } 503 504 /* 505 * The LWP hasn't parked yet. Take the hit and mark the 506 * operation as pending. 507 */ 508 mutex_spin_exit(mp); 509 510 mutex_enter(p->p_lock); 511 if ((t = lwp_find(p, target)) == NULL) { 512 mutex_exit(p->p_lock); 513 return ESRCH; 514 } 515 516 /* 517 * It may not have parked yet, we may have raced, or it 518 * is parked on a different user sync object. 519 */ 520 lwp_lock(t); 521 if (t->l_syncobj == &lwp_park_sobj) { 522 /* Releases the LWP lock. */ 523 (void)lwp_unsleep(t, true); 524 } else { 525 /* 526 * Set the operation pending. The next call to _lwp_park 527 * will return early. 528 */ 529 t->l_flag |= LW_UNPARKED; 530 lwp_unlock(t); 531 } 532 533 mutex_exit(p->p_lock); 534 return 0; 535 } 536 537 int 538 lwp_park(struct timespec *ts, const void *hint) 539 { 540 struct timespec tsx; 541 sleepq_t *sq; 542 kmutex_t *mp; 543 wchan_t wchan; 544 int timo, error; 545 lwp_t *l; 546 547 /* Fix up the given timeout value. */ 548 if (ts != NULL) { 549 getnanotime(&tsx); 550 timespecsub(ts, &tsx, &tsx); 551 if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0)) 552 return ETIMEDOUT; 553 if ((error = itimespecfix(&tsx)) != 0) 554 return error; 555 timo = tstohz(&tsx); 556 KASSERT(timo != 0); 557 } else 558 timo = 0; 559 560 /* Find and lock the sleep queue. */ 561 l = curlwp; 562 wchan = lwp_park_wchan(l->l_proc, hint); 563 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 564 565 /* 566 * Before going the full route and blocking, check to see if an 567 * unpark op is pending. 568 */ 569 lwp_lock(l); 570 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 571 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 572 lwp_unlock(l); 573 mutex_spin_exit(mp); 574 return EALREADY; 575 } 576 lwp_unlock_to(l, mp); 577 l->l_biglocks = 0; 578 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 579 error = sleepq_block(timo, true); 580 switch (error) { 581 case EWOULDBLOCK: 582 error = ETIMEDOUT; 583 break; 584 case ERESTART: 585 error = EINTR; 586 break; 587 default: 588 /* nothing */ 589 break; 590 } 591 return error; 592 } 593 594 /* 595 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 596 * will remain parked until another LWP in the same process calls in and 597 * requests that it be unparked. 598 */ 599 int 600 sys____lwp_park50(struct lwp *l, const struct sys____lwp_park50_args *uap, 601 register_t *retval) 602 { 603 /* { 604 syscallarg(const struct timespec *) ts; 605 syscallarg(lwpid_t) unpark; 606 syscallarg(const void *) hint; 607 syscallarg(const void *) unparkhint; 608 } */ 609 struct timespec ts, *tsp; 610 int error; 611 612 if (SCARG(uap, ts) == NULL) 613 tsp = NULL; 614 else { 615 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 616 if (error != 0) 617 return error; 618 tsp = &ts; 619 } 620 621 if (SCARG(uap, unpark) != 0) { 622 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 623 if (error != 0) 624 return error; 625 } 626 627 return lwp_park(tsp, SCARG(uap, hint)); 628 } 629 630 int 631 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, register_t *retval) 632 { 633 /* { 634 syscallarg(lwpid_t) target; 635 syscallarg(const void *) hint; 636 } */ 637 638 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 639 } 640 641 int 642 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval) 643 { 644 /* { 645 syscallarg(const lwpid_t *) targets; 646 syscallarg(size_t) ntargets; 647 syscallarg(const void *) hint; 648 } */ 649 struct proc *p; 650 struct lwp *t; 651 sleepq_t *sq; 652 wchan_t wchan; 653 lwpid_t targets[32], *tp, *tpp, *tmax, target; 654 int swapin, error; 655 kmutex_t *mp; 656 u_int ntargets; 657 size_t sz; 658 659 p = l->l_proc; 660 ntargets = SCARG(uap, ntargets); 661 662 if (SCARG(uap, targets) == NULL) { 663 /* 664 * Let the caller know how much we are willing to do, and 665 * let it unpark the LWPs in blocks. 666 */ 667 *retval = LWP_UNPARK_MAX; 668 return 0; 669 } 670 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 671 return EINVAL; 672 673 /* 674 * Copy in the target array. If it's a small number of LWPs, then 675 * place the numbers on the stack. 676 */ 677 sz = sizeof(target) * ntargets; 678 if (sz <= sizeof(targets)) 679 tp = targets; 680 else { 681 tp = kmem_alloc(sz, KM_SLEEP); 682 if (tp == NULL) 683 return ENOMEM; 684 } 685 error = copyin(SCARG(uap, targets), tp, sz); 686 if (error != 0) { 687 if (tp != targets) { 688 kmem_free(tp, sz); 689 } 690 return error; 691 } 692 693 swapin = 0; 694 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 695 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 696 697 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 698 target = *tpp; 699 700 /* 701 * Easy case: search for the LWP on the sleep queue. If 702 * it's parked, remove it from the queue and set running. 703 */ 704 TAILQ_FOREACH(t, sq, l_sleepchain) 705 if (t->l_proc == p && t->l_lid == target) 706 break; 707 708 if (t != NULL) { 709 swapin |= sleepq_remove(sq, t); 710 continue; 711 } 712 713 /* 714 * The LWP hasn't parked yet. Take the hit and 715 * mark the operation as pending. 716 */ 717 mutex_spin_exit(mp); 718 mutex_enter(p->p_lock); 719 if ((t = lwp_find(p, target)) == NULL) { 720 mutex_exit(p->p_lock); 721 mutex_spin_enter(mp); 722 continue; 723 } 724 lwp_lock(t); 725 726 /* 727 * It may not have parked yet, we may have raced, or 728 * it is parked on a different user sync object. 729 */ 730 if (t->l_syncobj == &lwp_park_sobj) { 731 /* Releases the LWP lock. */ 732 (void)lwp_unsleep(t, true); 733 } else { 734 /* 735 * Set the operation pending. The next call to 736 * _lwp_park will return early. 737 */ 738 t->l_flag |= LW_UNPARKED; 739 lwp_unlock(t); 740 } 741 742 mutex_exit(p->p_lock); 743 mutex_spin_enter(mp); 744 } 745 746 mutex_spin_exit(mp); 747 if (tp != targets) 748 kmem_free(tp, sz); 749 if (swapin) 750 uvm_kick_scheduler(); 751 752 return 0; 753 } 754 755 int 756 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, register_t *retval) 757 { 758 /* { 759 syscallarg(lwpid_t) target; 760 syscallarg(const char *) name; 761 } */ 762 char *name, *oname; 763 lwpid_t target; 764 proc_t *p; 765 lwp_t *t; 766 int error; 767 768 if ((target = SCARG(uap, target)) == 0) 769 target = l->l_lid; 770 771 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 772 if (name == NULL) 773 return ENOMEM; 774 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 775 switch (error) { 776 case ENAMETOOLONG: 777 case 0: 778 name[MAXCOMLEN - 1] = '\0'; 779 break; 780 default: 781 kmem_free(name, MAXCOMLEN); 782 return error; 783 } 784 785 p = curproc; 786 mutex_enter(p->p_lock); 787 if ((t = lwp_find(p, target)) == NULL) { 788 mutex_exit(p->p_lock); 789 kmem_free(name, MAXCOMLEN); 790 return ESRCH; 791 } 792 lwp_lock(t); 793 oname = t->l_name; 794 t->l_name = name; 795 lwp_unlock(t); 796 mutex_exit(p->p_lock); 797 798 if (oname != NULL) 799 kmem_free(oname, MAXCOMLEN); 800 801 return 0; 802 } 803 804 int 805 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, register_t *retval) 806 { 807 /* { 808 syscallarg(lwpid_t) target; 809 syscallarg(char *) name; 810 syscallarg(size_t) len; 811 } */ 812 char name[MAXCOMLEN]; 813 lwpid_t target; 814 proc_t *p; 815 lwp_t *t; 816 817 if ((target = SCARG(uap, target)) == 0) 818 target = l->l_lid; 819 820 p = curproc; 821 mutex_enter(p->p_lock); 822 if ((t = lwp_find(p, target)) == NULL) { 823 mutex_exit(p->p_lock); 824 return ESRCH; 825 } 826 lwp_lock(t); 827 if (t->l_name == NULL) 828 name[0] = '\0'; 829 else 830 strcpy(name, t->l_name); 831 lwp_unlock(t); 832 mutex_exit(p->p_lock); 833 834 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 835 } 836 837 int 838 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, register_t *retval) 839 { 840 /* { 841 syscallarg(int) features; 842 syscallarg(struct lwpctl **) address; 843 } */ 844 int error, features; 845 vaddr_t vaddr; 846 847 features = SCARG(uap, features); 848 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 849 if (features != 0) 850 return ENODEV; 851 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 852 return error; 853 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 854 } 855