1 /* $NetBSD: sys_lwp.c,v 1.43 2008/10/16 08:47:07 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 34 * of LWPs. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.43 2008/10/16 08:47:07 ad Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/pool.h> 43 #include <sys/proc.h> 44 #include <sys/types.h> 45 #include <sys/syscallargs.h> 46 #include <sys/kauth.h> 47 #include <sys/kmem.h> 48 #include <sys/sleepq.h> 49 #include <sys/lwpctl.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include "opt_sa.h" 54 55 #define LWP_UNPARK_MAX 1024 56 57 syncobj_t lwp_park_sobj = { 58 SOBJ_SLEEPQ_LIFO, 59 sleepq_unsleep, 60 sleepq_changepri, 61 sleepq_lendpri, 62 syncobj_noowner, 63 }; 64 65 sleeptab_t lwp_park_tab; 66 67 void 68 lwp_sys_init(void) 69 { 70 sleeptab_init(&lwp_park_tab); 71 } 72 73 /* ARGSUSED */ 74 int 75 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval) 76 { 77 /* { 78 syscallarg(const ucontext_t *) ucp; 79 syscallarg(u_long) flags; 80 syscallarg(lwpid_t *) new_lwp; 81 } */ 82 struct proc *p = l->l_proc; 83 struct lwp *l2; 84 vaddr_t uaddr; 85 bool inmem; 86 ucontext_t *newuc; 87 int error, lid; 88 89 #ifdef KERN_SA 90 mutex_enter(p->p_lock); 91 if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) { 92 mutex_exit(p->p_lock); 93 return EINVAL; 94 } 95 mutex_exit(p->p_lock); 96 #endif 97 98 newuc = pool_get(&lwp_uc_pool, PR_WAITOK); 99 100 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 101 if (error) { 102 pool_put(&lwp_uc_pool, newuc); 103 return error; 104 } 105 106 /* XXX check against resource limits */ 107 108 inmem = uvm_uarea_alloc(&uaddr); 109 if (__predict_false(uaddr == 0)) { 110 pool_put(&lwp_uc_pool, newuc); 111 return ENOMEM; 112 } 113 114 error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED, 115 NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); 116 if (error) { 117 uvm_uarea_free(uaddr, curcpu()); 118 pool_put(&lwp_uc_pool, newuc); 119 return error; 120 } 121 122 lid = l2->l_lid; 123 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 124 if (error) { 125 lwp_exit(l2); 126 pool_put(&lwp_uc_pool, newuc); 127 return error; 128 } 129 130 /* 131 * Set the new LWP running, unless the caller has requested that 132 * it be created in suspended state. If the process is stopping, 133 * then the LWP is created stopped. 134 */ 135 mutex_enter(p->p_lock); 136 lwp_lock(l2); 137 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && 138 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 139 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) 140 l2->l_stat = LSSTOP; 141 else { 142 KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex)); 143 p->p_nrlwps++; 144 l2->l_stat = LSRUN; 145 sched_enqueue(l2, false); 146 } 147 lwp_unlock(l2); 148 } else { 149 l2->l_stat = LSSUSPENDED; 150 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock); 151 } 152 mutex_exit(p->p_lock); 153 154 return 0; 155 } 156 157 int 158 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 159 { 160 161 lwp_exit(l); 162 return 0; 163 } 164 165 int 166 sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 167 { 168 169 *retval = l->l_lid; 170 return 0; 171 } 172 173 int 174 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 175 { 176 177 *retval = (uintptr_t)l->l_private; 178 return 0; 179 } 180 181 int 182 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, register_t *retval) 183 { 184 /* { 185 syscallarg(void *) ptr; 186 } */ 187 188 l->l_private = SCARG(uap, ptr); 189 return 0; 190 } 191 192 int 193 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, register_t *retval) 194 { 195 /* { 196 syscallarg(lwpid_t) target; 197 } */ 198 struct proc *p = l->l_proc; 199 struct lwp *t; 200 int error; 201 202 mutex_enter(p->p_lock); 203 204 #ifdef KERN_SA 205 if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) { 206 mutex_exit(p->p_lock); 207 return EINVAL; 208 } 209 #endif 210 211 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 212 mutex_exit(p->p_lock); 213 return ESRCH; 214 } 215 216 /* 217 * Check for deadlock, which is only possible when we're suspending 218 * ourself. XXX There is a short race here, as p_nrlwps is only 219 * incremented when an LWP suspends itself on the kernel/user 220 * boundary. It's still possible to kill -9 the process so we 221 * don't bother checking further. 222 */ 223 lwp_lock(t); 224 if ((t == l && p->p_nrlwps == 1) || 225 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 226 lwp_unlock(t); 227 mutex_exit(p->p_lock); 228 return EDEADLK; 229 } 230 231 /* 232 * Suspend the LWP. XXX If it's on a different CPU, we should wait 233 * for it to be preempted, where it will put itself to sleep. 234 * 235 * Suspension of the current LWP will happen on return to userspace. 236 */ 237 error = lwp_suspend(l, t); 238 if (error) { 239 mutex_exit(p->p_lock); 240 return error; 241 } 242 243 /* 244 * Wait for: 245 * o process exiting 246 * o target LWP suspended 247 * o target LWP not suspended and L_WSUSPEND clear 248 * o target LWP exited 249 */ 250 for (;;) { 251 error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 252 if (error) { 253 error = ERESTART; 254 break; 255 } 256 if (lwp_find(p, SCARG(uap, target)) == NULL) { 257 error = ESRCH; 258 break; 259 } 260 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 261 error = ERESTART; 262 break; 263 } 264 if (t->l_stat == LSSUSPENDED || 265 (t->l_flag & LW_WSUSPEND) == 0) 266 break; 267 } 268 mutex_exit(p->p_lock); 269 270 return error; 271 } 272 273 int 274 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, register_t *retval) 275 { 276 /* { 277 syscallarg(lwpid_t) target; 278 } */ 279 int error; 280 struct proc *p = l->l_proc; 281 struct lwp *t; 282 283 error = 0; 284 285 mutex_enter(p->p_lock); 286 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 287 mutex_exit(p->p_lock); 288 return ESRCH; 289 } 290 291 lwp_lock(t); 292 lwp_continue(t); 293 mutex_exit(p->p_lock); 294 295 return error; 296 } 297 298 int 299 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, register_t *retval) 300 { 301 /* { 302 syscallarg(lwpid_t) target; 303 } */ 304 struct lwp *t; 305 struct proc *p; 306 int error; 307 308 p = l->l_proc; 309 mutex_enter(p->p_lock); 310 311 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 312 mutex_exit(p->p_lock); 313 return ESRCH; 314 } 315 316 lwp_lock(t); 317 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 318 319 if (t->l_stat != LSSLEEP) { 320 lwp_unlock(t); 321 error = ENODEV; 322 } else if ((t->l_flag & LW_SINTR) == 0) { 323 lwp_unlock(t); 324 error = EBUSY; 325 } else { 326 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 327 (void)lwp_unsleep(t, true); 328 error = 0; 329 } 330 331 mutex_exit(p->p_lock); 332 333 return error; 334 } 335 336 int 337 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, register_t *retval) 338 { 339 /* { 340 syscallarg(lwpid_t) wait_for; 341 syscallarg(lwpid_t *) departed; 342 } */ 343 struct proc *p = l->l_proc; 344 int error; 345 lwpid_t dep; 346 347 mutex_enter(p->p_lock); 348 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0); 349 mutex_exit(p->p_lock); 350 351 if (error) 352 return error; 353 354 if (SCARG(uap, departed)) { 355 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 356 if (error) 357 return error; 358 } 359 360 return 0; 361 } 362 363 /* ARGSUSED */ 364 int 365 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, register_t *retval) 366 { 367 /* { 368 syscallarg(lwpid_t) target; 369 syscallarg(int) signo; 370 } */ 371 struct proc *p = l->l_proc; 372 struct lwp *t; 373 ksiginfo_t ksi; 374 int signo = SCARG(uap, signo); 375 int error = 0; 376 377 if ((u_int)signo >= NSIG) 378 return EINVAL; 379 380 KSI_INIT(&ksi); 381 ksi.ksi_signo = signo; 382 ksi.ksi_code = SI_LWP; 383 ksi.ksi_pid = p->p_pid; 384 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 385 ksi.ksi_lid = SCARG(uap, target); 386 387 mutex_enter(proc_lock); 388 mutex_enter(p->p_lock); 389 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 390 error = ESRCH; 391 else if (signo != 0) 392 kpsignal2(p, &ksi); 393 mutex_exit(p->p_lock); 394 mutex_exit(proc_lock); 395 396 return error; 397 } 398 399 int 400 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, register_t *retval) 401 { 402 /* { 403 syscallarg(lwpid_t) target; 404 } */ 405 struct proc *p; 406 struct lwp *t; 407 lwpid_t target; 408 int error; 409 410 target = SCARG(uap, target); 411 p = l->l_proc; 412 413 mutex_enter(p->p_lock); 414 415 if (l->l_lid == target) 416 t = l; 417 else { 418 /* 419 * We can't use lwp_find() here because the target might 420 * be a zombie. 421 */ 422 LIST_FOREACH(t, &p->p_lwps, l_sibling) 423 if (t->l_lid == target) 424 break; 425 } 426 427 /* 428 * If the LWP is already detached, there's nothing to do. 429 * If it's a zombie, we need to clean up after it. LSZOMB 430 * is visible with the proc mutex held. 431 * 432 * After we have detached or released the LWP, kick any 433 * other LWPs that may be sitting in _lwp_wait(), waiting 434 * for the target LWP to exit. 435 */ 436 if (t != NULL && t->l_stat != LSIDL) { 437 if ((t->l_prflag & LPR_DETACHED) == 0) { 438 p->p_ndlwps++; 439 t->l_prflag |= LPR_DETACHED; 440 if (t->l_stat == LSZOMB) { 441 /* Releases proc mutex. */ 442 lwp_free(t, false, false); 443 return 0; 444 } 445 error = 0; 446 447 /* 448 * Have any LWPs sleeping in lwp_wait() recheck 449 * for deadlock. 450 */ 451 cv_broadcast(&p->p_lwpcv); 452 } else 453 error = EINVAL; 454 } else 455 error = ESRCH; 456 457 mutex_exit(p->p_lock); 458 459 return error; 460 } 461 462 static inline wchan_t 463 lwp_park_wchan(struct proc *p, const void *hint) 464 { 465 466 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 467 } 468 469 int 470 lwp_unpark(lwpid_t target, const void *hint) 471 { 472 sleepq_t *sq; 473 wchan_t wchan; 474 int swapin; 475 kmutex_t *mp; 476 proc_t *p; 477 lwp_t *t; 478 479 /* 480 * Easy case: search for the LWP on the sleep queue. If 481 * it's parked, remove it from the queue and set running. 482 */ 483 p = curproc; 484 wchan = lwp_park_wchan(p, hint); 485 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 486 487 TAILQ_FOREACH(t, sq, l_sleepchain) 488 if (t->l_proc == p && t->l_lid == target) 489 break; 490 491 if (__predict_true(t != NULL)) { 492 swapin = sleepq_remove(sq, t); 493 mutex_spin_exit(mp); 494 if (swapin) 495 uvm_kick_scheduler(); 496 return 0; 497 } 498 499 /* 500 * The LWP hasn't parked yet. Take the hit and mark the 501 * operation as pending. 502 */ 503 mutex_spin_exit(mp); 504 505 mutex_enter(p->p_lock); 506 if ((t = lwp_find(p, target)) == NULL) { 507 mutex_exit(p->p_lock); 508 return ESRCH; 509 } 510 511 /* 512 * It may not have parked yet, we may have raced, or it 513 * is parked on a different user sync object. 514 */ 515 lwp_lock(t); 516 if (t->l_syncobj == &lwp_park_sobj) { 517 /* Releases the LWP lock. */ 518 (void)lwp_unsleep(t, true); 519 } else { 520 /* 521 * Set the operation pending. The next call to _lwp_park 522 * will return early. 523 */ 524 t->l_flag |= LW_UNPARKED; 525 lwp_unlock(t); 526 } 527 528 mutex_exit(p->p_lock); 529 return 0; 530 } 531 532 int 533 lwp_park(struct timespec *ts, const void *hint) 534 { 535 struct timespec tsx; 536 sleepq_t *sq; 537 kmutex_t *mp; 538 wchan_t wchan; 539 int timo, error; 540 lwp_t *l; 541 542 /* Fix up the given timeout value. */ 543 if (ts != NULL) { 544 getnanotime(&tsx); 545 timespecsub(ts, &tsx, &tsx); 546 if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0)) 547 return ETIMEDOUT; 548 if ((error = itimespecfix(&tsx)) != 0) 549 return error; 550 timo = tstohz(&tsx); 551 KASSERT(timo != 0); 552 } else 553 timo = 0; 554 555 /* Find and lock the sleep queue. */ 556 l = curlwp; 557 wchan = lwp_park_wchan(l->l_proc, hint); 558 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 559 560 /* 561 * Before going the full route and blocking, check to see if an 562 * unpark op is pending. 563 */ 564 lwp_lock(l); 565 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 566 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 567 lwp_unlock(l); 568 mutex_spin_exit(mp); 569 return EALREADY; 570 } 571 lwp_unlock_to(l, mp); 572 l->l_biglocks = 0; 573 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 574 error = sleepq_block(timo, true); 575 switch (error) { 576 case EWOULDBLOCK: 577 error = ETIMEDOUT; 578 break; 579 case ERESTART: 580 error = EINTR; 581 break; 582 default: 583 /* nothing */ 584 break; 585 } 586 return error; 587 } 588 589 /* 590 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 591 * will remain parked until another LWP in the same process calls in and 592 * requests that it be unparked. 593 */ 594 int 595 sys__lwp_park(struct lwp *l, const struct sys__lwp_park_args *uap, register_t *retval) 596 { 597 /* { 598 syscallarg(const struct timespec *) ts; 599 syscallarg(lwpid_t) unpark; 600 syscallarg(const void *) hint; 601 syscallarg(const void *) unparkhint; 602 } */ 603 struct timespec ts, *tsp; 604 int error; 605 606 if (SCARG(uap, ts) == NULL) 607 tsp = NULL; 608 else { 609 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 610 if (error != 0) 611 return error; 612 tsp = &ts; 613 } 614 615 if (SCARG(uap, unpark) != 0) { 616 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 617 if (error != 0) 618 return error; 619 } 620 621 return lwp_park(tsp, SCARG(uap, hint)); 622 } 623 624 int 625 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, register_t *retval) 626 { 627 /* { 628 syscallarg(lwpid_t) target; 629 syscallarg(const void *) hint; 630 } */ 631 632 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 633 } 634 635 int 636 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval) 637 { 638 /* { 639 syscallarg(const lwpid_t *) targets; 640 syscallarg(size_t) ntargets; 641 syscallarg(const void *) hint; 642 } */ 643 struct proc *p; 644 struct lwp *t; 645 sleepq_t *sq; 646 wchan_t wchan; 647 lwpid_t targets[32], *tp, *tpp, *tmax, target; 648 int swapin, error; 649 kmutex_t *mp; 650 u_int ntargets; 651 size_t sz; 652 653 p = l->l_proc; 654 ntargets = SCARG(uap, ntargets); 655 656 if (SCARG(uap, targets) == NULL) { 657 /* 658 * Let the caller know how much we are willing to do, and 659 * let it unpark the LWPs in blocks. 660 */ 661 *retval = LWP_UNPARK_MAX; 662 return 0; 663 } 664 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 665 return EINVAL; 666 667 /* 668 * Copy in the target array. If it's a small number of LWPs, then 669 * place the numbers on the stack. 670 */ 671 sz = sizeof(target) * ntargets; 672 if (sz <= sizeof(targets)) 673 tp = targets; 674 else { 675 tp = kmem_alloc(sz, KM_SLEEP); 676 if (tp == NULL) 677 return ENOMEM; 678 } 679 error = copyin(SCARG(uap, targets), tp, sz); 680 if (error != 0) { 681 if (tp != targets) { 682 kmem_free(tp, sz); 683 } 684 return error; 685 } 686 687 swapin = 0; 688 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 689 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 690 691 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 692 target = *tpp; 693 694 /* 695 * Easy case: search for the LWP on the sleep queue. If 696 * it's parked, remove it from the queue and set running. 697 */ 698 TAILQ_FOREACH(t, sq, l_sleepchain) 699 if (t->l_proc == p && t->l_lid == target) 700 break; 701 702 if (t != NULL) { 703 swapin |= sleepq_remove(sq, t); 704 continue; 705 } 706 707 /* 708 * The LWP hasn't parked yet. Take the hit and 709 * mark the operation as pending. 710 */ 711 mutex_spin_exit(mp); 712 mutex_enter(p->p_lock); 713 if ((t = lwp_find(p, target)) == NULL) { 714 mutex_exit(p->p_lock); 715 mutex_spin_enter(mp); 716 continue; 717 } 718 lwp_lock(t); 719 720 /* 721 * It may not have parked yet, we may have raced, or 722 * it is parked on a different user sync object. 723 */ 724 if (t->l_syncobj == &lwp_park_sobj) { 725 /* Releases the LWP lock. */ 726 (void)lwp_unsleep(t, true); 727 } else { 728 /* 729 * Set the operation pending. The next call to 730 * _lwp_park will return early. 731 */ 732 t->l_flag |= LW_UNPARKED; 733 lwp_unlock(t); 734 } 735 736 mutex_exit(p->p_lock); 737 mutex_spin_enter(mp); 738 } 739 740 mutex_spin_exit(mp); 741 if (tp != targets) 742 kmem_free(tp, sz); 743 if (swapin) 744 uvm_kick_scheduler(); 745 746 return 0; 747 } 748 749 int 750 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, register_t *retval) 751 { 752 /* { 753 syscallarg(lwpid_t) target; 754 syscallarg(const char *) name; 755 } */ 756 char *name, *oname; 757 lwpid_t target; 758 proc_t *p; 759 lwp_t *t; 760 int error; 761 762 if ((target = SCARG(uap, target)) == 0) 763 target = l->l_lid; 764 765 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 766 if (name == NULL) 767 return ENOMEM; 768 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 769 switch (error) { 770 case ENAMETOOLONG: 771 case 0: 772 name[MAXCOMLEN - 1] = '\0'; 773 break; 774 default: 775 kmem_free(name, MAXCOMLEN); 776 return error; 777 } 778 779 p = curproc; 780 mutex_enter(p->p_lock); 781 if ((t = lwp_find(p, target)) == NULL) { 782 mutex_exit(p->p_lock); 783 kmem_free(name, MAXCOMLEN); 784 return ESRCH; 785 } 786 lwp_lock(t); 787 oname = t->l_name; 788 t->l_name = name; 789 lwp_unlock(t); 790 mutex_exit(p->p_lock); 791 792 if (oname != NULL) 793 kmem_free(oname, MAXCOMLEN); 794 795 return 0; 796 } 797 798 int 799 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, register_t *retval) 800 { 801 /* { 802 syscallarg(lwpid_t) target; 803 syscallarg(char *) name; 804 syscallarg(size_t) len; 805 } */ 806 char name[MAXCOMLEN]; 807 lwpid_t target; 808 proc_t *p; 809 lwp_t *t; 810 811 if ((target = SCARG(uap, target)) == 0) 812 target = l->l_lid; 813 814 p = curproc; 815 mutex_enter(p->p_lock); 816 if ((t = lwp_find(p, target)) == NULL) { 817 mutex_exit(p->p_lock); 818 return ESRCH; 819 } 820 lwp_lock(t); 821 if (t->l_name == NULL) 822 name[0] = '\0'; 823 else 824 strcpy(name, t->l_name); 825 lwp_unlock(t); 826 mutex_exit(p->p_lock); 827 828 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 829 } 830 831 int 832 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, register_t *retval) 833 { 834 /* { 835 syscallarg(int) features; 836 syscallarg(struct lwpctl **) address; 837 } */ 838 int error, features; 839 vaddr_t vaddr; 840 841 features = SCARG(uap, features); 842 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 843 if (features != 0) 844 return ENODEV; 845 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 846 return error; 847 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 848 } 849