1 /* $NetBSD: sys_lwp.c,v 1.54 2012/05/21 14:15:19 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 34 * of LWPs. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.54 2012/05/21 14:15:19 martin Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/pool.h> 43 #include <sys/proc.h> 44 #include <sys/types.h> 45 #include <sys/syscallargs.h> 46 #include <sys/kauth.h> 47 #include <sys/kmem.h> 48 #include <sys/sleepq.h> 49 #include <sys/lwpctl.h> 50 #include <sys/cpu.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #define LWP_UNPARK_MAX 1024 55 56 static syncobj_t lwp_park_sobj = { 57 SOBJ_SLEEPQ_LIFO, 58 sleepq_unsleep, 59 sleepq_changepri, 60 sleepq_lendpri, 61 syncobj_noowner, 62 }; 63 64 static sleeptab_t lwp_park_tab; 65 66 void 67 lwp_sys_init(void) 68 { 69 sleeptab_init(&lwp_park_tab); 70 } 71 72 int 73 do_lwp_create(lwp_t *l, void *arg, u_long flags, lwpid_t *new_lwp) 74 { 75 struct proc *p = l->l_proc; 76 struct lwp *l2; 77 struct schedstate_percpu *spc; 78 vaddr_t uaddr; 79 int error; 80 81 /* XXX check against resource limits */ 82 83 uaddr = uvm_uarea_alloc(); 84 if (__predict_false(uaddr == 0)) 85 return ENOMEM; 86 87 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, 88 NULL, 0, p->p_emul->e_startlwp, arg, &l2, l->l_class); 89 if (__predict_false(error)) { 90 uvm_uarea_free(uaddr); 91 return error; 92 } 93 94 *new_lwp = l2->l_lid; 95 96 /* 97 * Set the new LWP running, unless the caller has requested that 98 * it be created in suspended state. If the process is stopping, 99 * then the LWP is created stopped. 100 */ 101 mutex_enter(p->p_lock); 102 lwp_lock(l2); 103 spc = &l2->l_cpu->ci_schedstate; 104 if ((flags & LWP_SUSPENDED) == 0 && 105 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 106 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 107 KASSERT(l2->l_wchan == NULL); 108 l2->l_stat = LSSTOP; 109 p->p_nrlwps--; 110 lwp_unlock_to(l2, spc->spc_lwplock); 111 } else { 112 KASSERT(lwp_locked(l2, spc->spc_mutex)); 113 l2->l_stat = LSRUN; 114 sched_enqueue(l2, false); 115 lwp_unlock(l2); 116 } 117 } else { 118 l2->l_stat = LSSUSPENDED; 119 p->p_nrlwps--; 120 lwp_unlock_to(l2, spc->spc_lwplock); 121 } 122 mutex_exit(p->p_lock); 123 124 return 0; 125 } 126 127 int 128 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, 129 register_t *retval) 130 { 131 /* { 132 syscallarg(const ucontext_t *) ucp; 133 syscallarg(u_long) flags; 134 syscallarg(lwpid_t *) new_lwp; 135 } */ 136 struct proc *p = l->l_proc; 137 ucontext_t *newuc = NULL; 138 lwpid_t lid; 139 int error; 140 141 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP); 142 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 143 if (error) 144 goto fail; 145 146 /* validate the ucontext */ 147 if ((newuc->uc_flags & _UC_CPU) == 0) { 148 error = EINVAL; 149 goto fail; 150 } 151 error = cpu_mcontext_validate(l, &newuc->uc_mcontext); 152 if (error) 153 goto fail; 154 155 error = do_lwp_create(l, newuc, SCARG(uap, flags), &lid); 156 if (error) 157 goto fail; 158 159 /* 160 * do not free ucontext in case of an error here, 161 * the lwp will actually run and access it 162 */ 163 return copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 164 165 fail: 166 kmem_free(newuc, sizeof(ucontext_t)); 167 return error; 168 } 169 170 int 171 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 172 { 173 174 lwp_exit(l); 175 return 0; 176 } 177 178 int 179 sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 180 { 181 182 *retval = l->l_lid; 183 return 0; 184 } 185 186 int 187 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 188 { 189 190 *retval = (uintptr_t)l->l_private; 191 return 0; 192 } 193 194 int 195 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, 196 register_t *retval) 197 { 198 /* { 199 syscallarg(void *) ptr; 200 } */ 201 202 return lwp_setprivate(l, SCARG(uap, ptr)); 203 } 204 205 int 206 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, 207 register_t *retval) 208 { 209 /* { 210 syscallarg(lwpid_t) target; 211 } */ 212 struct proc *p = l->l_proc; 213 struct lwp *t; 214 int error; 215 216 mutex_enter(p->p_lock); 217 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 218 mutex_exit(p->p_lock); 219 return ESRCH; 220 } 221 222 /* 223 * Check for deadlock, which is only possible when we're suspending 224 * ourself. XXX There is a short race here, as p_nrlwps is only 225 * incremented when an LWP suspends itself on the kernel/user 226 * boundary. It's still possible to kill -9 the process so we 227 * don't bother checking further. 228 */ 229 lwp_lock(t); 230 if ((t == l && p->p_nrlwps == 1) || 231 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 232 lwp_unlock(t); 233 mutex_exit(p->p_lock); 234 return EDEADLK; 235 } 236 237 /* 238 * Suspend the LWP. XXX If it's on a different CPU, we should wait 239 * for it to be preempted, where it will put itself to sleep. 240 * 241 * Suspension of the current LWP will happen on return to userspace. 242 */ 243 error = lwp_suspend(l, t); 244 if (error) { 245 mutex_exit(p->p_lock); 246 return error; 247 } 248 249 /* 250 * Wait for: 251 * o process exiting 252 * o target LWP suspended 253 * o target LWP not suspended and L_WSUSPEND clear 254 * o target LWP exited 255 */ 256 for (;;) { 257 error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 258 if (error) { 259 error = ERESTART; 260 break; 261 } 262 if (lwp_find(p, SCARG(uap, target)) == NULL) { 263 error = ESRCH; 264 break; 265 } 266 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 267 error = ERESTART; 268 break; 269 } 270 if (t->l_stat == LSSUSPENDED || 271 (t->l_flag & LW_WSUSPEND) == 0) 272 break; 273 } 274 mutex_exit(p->p_lock); 275 276 return error; 277 } 278 279 int 280 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, 281 register_t *retval) 282 { 283 /* { 284 syscallarg(lwpid_t) target; 285 } */ 286 int error; 287 struct proc *p = l->l_proc; 288 struct lwp *t; 289 290 error = 0; 291 292 mutex_enter(p->p_lock); 293 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 294 mutex_exit(p->p_lock); 295 return ESRCH; 296 } 297 298 lwp_lock(t); 299 lwp_continue(t); 300 mutex_exit(p->p_lock); 301 302 return error; 303 } 304 305 int 306 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, 307 register_t *retval) 308 { 309 /* { 310 syscallarg(lwpid_t) target; 311 } */ 312 struct lwp *t; 313 struct proc *p; 314 int error; 315 316 p = l->l_proc; 317 mutex_enter(p->p_lock); 318 319 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 320 mutex_exit(p->p_lock); 321 return ESRCH; 322 } 323 324 lwp_lock(t); 325 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 326 327 if (t->l_stat != LSSLEEP) { 328 lwp_unlock(t); 329 error = ENODEV; 330 } else if ((t->l_flag & LW_SINTR) == 0) { 331 lwp_unlock(t); 332 error = EBUSY; 333 } else { 334 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 335 lwp_unsleep(t, true); 336 error = 0; 337 } 338 339 mutex_exit(p->p_lock); 340 341 return error; 342 } 343 344 int 345 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, 346 register_t *retval) 347 { 348 /* { 349 syscallarg(lwpid_t) wait_for; 350 syscallarg(lwpid_t *) departed; 351 } */ 352 struct proc *p = l->l_proc; 353 int error; 354 lwpid_t dep; 355 356 mutex_enter(p->p_lock); 357 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0); 358 mutex_exit(p->p_lock); 359 360 if (error) 361 return error; 362 363 if (SCARG(uap, departed)) { 364 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 365 if (error) 366 return error; 367 } 368 369 return 0; 370 } 371 372 int 373 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, 374 register_t *retval) 375 { 376 /* { 377 syscallarg(lwpid_t) target; 378 syscallarg(int) signo; 379 } */ 380 struct proc *p = l->l_proc; 381 struct lwp *t; 382 ksiginfo_t ksi; 383 int signo = SCARG(uap, signo); 384 int error = 0; 385 386 if ((u_int)signo >= NSIG) 387 return EINVAL; 388 389 KSI_INIT(&ksi); 390 ksi.ksi_signo = signo; 391 ksi.ksi_code = SI_LWP; 392 ksi.ksi_pid = p->p_pid; 393 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 394 ksi.ksi_lid = SCARG(uap, target); 395 396 mutex_enter(proc_lock); 397 mutex_enter(p->p_lock); 398 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 399 error = ESRCH; 400 else if (signo != 0) 401 kpsignal2(p, &ksi); 402 mutex_exit(p->p_lock); 403 mutex_exit(proc_lock); 404 405 return error; 406 } 407 408 int 409 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, 410 register_t *retval) 411 { 412 /* { 413 syscallarg(lwpid_t) target; 414 } */ 415 struct proc *p; 416 struct lwp *t; 417 lwpid_t target; 418 int error; 419 420 target = SCARG(uap, target); 421 p = l->l_proc; 422 423 mutex_enter(p->p_lock); 424 425 if (l->l_lid == target) 426 t = l; 427 else { 428 /* 429 * We can't use lwp_find() here because the target might 430 * be a zombie. 431 */ 432 LIST_FOREACH(t, &p->p_lwps, l_sibling) 433 if (t->l_lid == target) 434 break; 435 } 436 437 /* 438 * If the LWP is already detached, there's nothing to do. 439 * If it's a zombie, we need to clean up after it. LSZOMB 440 * is visible with the proc mutex held. 441 * 442 * After we have detached or released the LWP, kick any 443 * other LWPs that may be sitting in _lwp_wait(), waiting 444 * for the target LWP to exit. 445 */ 446 if (t != NULL && t->l_stat != LSIDL) { 447 if ((t->l_prflag & LPR_DETACHED) == 0) { 448 p->p_ndlwps++; 449 t->l_prflag |= LPR_DETACHED; 450 if (t->l_stat == LSZOMB) { 451 /* Releases proc mutex. */ 452 lwp_free(t, false, false); 453 return 0; 454 } 455 error = 0; 456 457 /* 458 * Have any LWPs sleeping in lwp_wait() recheck 459 * for deadlock. 460 */ 461 cv_broadcast(&p->p_lwpcv); 462 } else 463 error = EINVAL; 464 } else 465 error = ESRCH; 466 467 mutex_exit(p->p_lock); 468 469 return error; 470 } 471 472 static inline wchan_t 473 lwp_park_wchan(struct proc *p, const void *hint) 474 { 475 476 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 477 } 478 479 int 480 lwp_unpark(lwpid_t target, const void *hint) 481 { 482 sleepq_t *sq; 483 wchan_t wchan; 484 kmutex_t *mp; 485 proc_t *p; 486 lwp_t *t; 487 488 /* 489 * Easy case: search for the LWP on the sleep queue. If 490 * it's parked, remove it from the queue and set running. 491 */ 492 p = curproc; 493 wchan = lwp_park_wchan(p, hint); 494 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 495 496 TAILQ_FOREACH(t, sq, l_sleepchain) 497 if (t->l_proc == p && t->l_lid == target) 498 break; 499 500 if (__predict_true(t != NULL)) { 501 sleepq_remove(sq, t); 502 mutex_spin_exit(mp); 503 return 0; 504 } 505 506 /* 507 * The LWP hasn't parked yet. Take the hit and mark the 508 * operation as pending. 509 */ 510 mutex_spin_exit(mp); 511 512 mutex_enter(p->p_lock); 513 if ((t = lwp_find(p, target)) == NULL) { 514 mutex_exit(p->p_lock); 515 return ESRCH; 516 } 517 518 /* 519 * It may not have parked yet, we may have raced, or it 520 * is parked on a different user sync object. 521 */ 522 lwp_lock(t); 523 if (t->l_syncobj == &lwp_park_sobj) { 524 /* Releases the LWP lock. */ 525 lwp_unsleep(t, true); 526 } else { 527 /* 528 * Set the operation pending. The next call to _lwp_park 529 * will return early. 530 */ 531 t->l_flag |= LW_UNPARKED; 532 lwp_unlock(t); 533 } 534 535 mutex_exit(p->p_lock); 536 return 0; 537 } 538 539 int 540 lwp_park(struct timespec *ts, const void *hint) 541 { 542 sleepq_t *sq; 543 kmutex_t *mp; 544 wchan_t wchan; 545 int timo, error; 546 lwp_t *l; 547 548 /* Fix up the given timeout value. */ 549 if (ts != NULL) { 550 error = abstimeout2timo(ts, &timo); 551 if (error) { 552 return error; 553 } 554 KASSERT(timo != 0); 555 } else { 556 timo = 0; 557 } 558 559 /* Find and lock the sleep queue. */ 560 l = curlwp; 561 wchan = lwp_park_wchan(l->l_proc, hint); 562 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 563 564 /* 565 * Before going the full route and blocking, check to see if an 566 * unpark op is pending. 567 */ 568 lwp_lock(l); 569 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 570 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 571 lwp_unlock(l); 572 mutex_spin_exit(mp); 573 return EALREADY; 574 } 575 lwp_unlock_to(l, mp); 576 l->l_biglocks = 0; 577 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 578 error = sleepq_block(timo, true); 579 switch (error) { 580 case EWOULDBLOCK: 581 error = ETIMEDOUT; 582 break; 583 case ERESTART: 584 error = EINTR; 585 break; 586 default: 587 /* nothing */ 588 break; 589 } 590 return error; 591 } 592 593 /* 594 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 595 * will remain parked until another LWP in the same process calls in and 596 * requests that it be unparked. 597 */ 598 int 599 sys____lwp_park50(struct lwp *l, const struct sys____lwp_park50_args *uap, 600 register_t *retval) 601 { 602 /* { 603 syscallarg(const struct timespec *) ts; 604 syscallarg(lwpid_t) unpark; 605 syscallarg(const void *) hint; 606 syscallarg(const void *) unparkhint; 607 } */ 608 struct timespec ts, *tsp; 609 int error; 610 611 if (SCARG(uap, ts) == NULL) 612 tsp = NULL; 613 else { 614 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 615 if (error != 0) 616 return error; 617 tsp = &ts; 618 } 619 620 if (SCARG(uap, unpark) != 0) { 621 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 622 if (error != 0) 623 return error; 624 } 625 626 return lwp_park(tsp, SCARG(uap, hint)); 627 } 628 629 int 630 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, 631 register_t *retval) 632 { 633 /* { 634 syscallarg(lwpid_t) target; 635 syscallarg(const void *) hint; 636 } */ 637 638 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 639 } 640 641 int 642 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, 643 register_t *retval) 644 { 645 /* { 646 syscallarg(const lwpid_t *) targets; 647 syscallarg(size_t) ntargets; 648 syscallarg(const void *) hint; 649 } */ 650 struct proc *p; 651 struct lwp *t; 652 sleepq_t *sq; 653 wchan_t wchan; 654 lwpid_t targets[32], *tp, *tpp, *tmax, target; 655 int error; 656 kmutex_t *mp; 657 u_int ntargets; 658 size_t sz; 659 660 p = l->l_proc; 661 ntargets = SCARG(uap, ntargets); 662 663 if (SCARG(uap, targets) == NULL) { 664 /* 665 * Let the caller know how much we are willing to do, and 666 * let it unpark the LWPs in blocks. 667 */ 668 *retval = LWP_UNPARK_MAX; 669 return 0; 670 } 671 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 672 return EINVAL; 673 674 /* 675 * Copy in the target array. If it's a small number of LWPs, then 676 * place the numbers on the stack. 677 */ 678 sz = sizeof(target) * ntargets; 679 if (sz <= sizeof(targets)) 680 tp = targets; 681 else { 682 tp = kmem_alloc(sz, KM_SLEEP); 683 if (tp == NULL) 684 return ENOMEM; 685 } 686 error = copyin(SCARG(uap, targets), tp, sz); 687 if (error != 0) { 688 if (tp != targets) { 689 kmem_free(tp, sz); 690 } 691 return error; 692 } 693 694 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 695 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 696 697 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 698 target = *tpp; 699 700 /* 701 * Easy case: search for the LWP on the sleep queue. If 702 * it's parked, remove it from the queue and set running. 703 */ 704 TAILQ_FOREACH(t, sq, l_sleepchain) 705 if (t->l_proc == p && t->l_lid == target) 706 break; 707 708 if (t != NULL) { 709 sleepq_remove(sq, t); 710 continue; 711 } 712 713 /* 714 * The LWP hasn't parked yet. Take the hit and 715 * mark the operation as pending. 716 */ 717 mutex_spin_exit(mp); 718 mutex_enter(p->p_lock); 719 if ((t = lwp_find(p, target)) == NULL) { 720 mutex_exit(p->p_lock); 721 mutex_spin_enter(mp); 722 continue; 723 } 724 lwp_lock(t); 725 726 /* 727 * It may not have parked yet, we may have raced, or 728 * it is parked on a different user sync object. 729 */ 730 if (t->l_syncobj == &lwp_park_sobj) { 731 /* Releases the LWP lock. */ 732 lwp_unsleep(t, true); 733 } else { 734 /* 735 * Set the operation pending. The next call to 736 * _lwp_park will return early. 737 */ 738 t->l_flag |= LW_UNPARKED; 739 lwp_unlock(t); 740 } 741 742 mutex_exit(p->p_lock); 743 mutex_spin_enter(mp); 744 } 745 746 mutex_spin_exit(mp); 747 if (tp != targets) 748 kmem_free(tp, sz); 749 750 return 0; 751 } 752 753 int 754 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, 755 register_t *retval) 756 { 757 /* { 758 syscallarg(lwpid_t) target; 759 syscallarg(const char *) name; 760 } */ 761 char *name, *oname; 762 lwpid_t target; 763 proc_t *p; 764 lwp_t *t; 765 int error; 766 767 if ((target = SCARG(uap, target)) == 0) 768 target = l->l_lid; 769 770 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 771 if (name == NULL) 772 return ENOMEM; 773 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 774 switch (error) { 775 case ENAMETOOLONG: 776 case 0: 777 name[MAXCOMLEN - 1] = '\0'; 778 break; 779 default: 780 kmem_free(name, MAXCOMLEN); 781 return error; 782 } 783 784 p = curproc; 785 mutex_enter(p->p_lock); 786 if ((t = lwp_find(p, target)) == NULL) { 787 mutex_exit(p->p_lock); 788 kmem_free(name, MAXCOMLEN); 789 return ESRCH; 790 } 791 lwp_lock(t); 792 oname = t->l_name; 793 t->l_name = name; 794 lwp_unlock(t); 795 mutex_exit(p->p_lock); 796 797 if (oname != NULL) 798 kmem_free(oname, MAXCOMLEN); 799 800 return 0; 801 } 802 803 int 804 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, 805 register_t *retval) 806 { 807 /* { 808 syscallarg(lwpid_t) target; 809 syscallarg(char *) name; 810 syscallarg(size_t) len; 811 } */ 812 char name[MAXCOMLEN]; 813 lwpid_t target; 814 proc_t *p; 815 lwp_t *t; 816 817 if ((target = SCARG(uap, target)) == 0) 818 target = l->l_lid; 819 820 p = curproc; 821 mutex_enter(p->p_lock); 822 if ((t = lwp_find(p, target)) == NULL) { 823 mutex_exit(p->p_lock); 824 return ESRCH; 825 } 826 lwp_lock(t); 827 if (t->l_name == NULL) 828 name[0] = '\0'; 829 else 830 strcpy(name, t->l_name); 831 lwp_unlock(t); 832 mutex_exit(p->p_lock); 833 834 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 835 } 836 837 int 838 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, 839 register_t *retval) 840 { 841 /* { 842 syscallarg(int) features; 843 syscallarg(struct lwpctl **) address; 844 } */ 845 int error, features; 846 vaddr_t vaddr; 847 848 features = SCARG(uap, features); 849 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 850 if (features != 0) 851 return ENODEV; 852 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 853 return error; 854 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 855 } 856