1 /* $NetBSD: sys_lwp.c,v 1.47 2009/10/22 13:12:47 rmind Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 34 * of LWPs. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.47 2009/10/22 13:12:47 rmind Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/pool.h> 43 #include <sys/proc.h> 44 #include <sys/types.h> 45 #include <sys/syscallargs.h> 46 #include <sys/kauth.h> 47 #include <sys/kmem.h> 48 #include <sys/sleepq.h> 49 #include <sys/lwpctl.h> 50 #include <sys/cpu.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include "opt_sa.h" 55 56 #define LWP_UNPARK_MAX 1024 57 58 static syncobj_t lwp_park_sobj = { 59 SOBJ_SLEEPQ_LIFO, 60 sleepq_unsleep, 61 sleepq_changepri, 62 sleepq_lendpri, 63 syncobj_noowner, 64 }; 65 66 static sleeptab_t lwp_park_tab; 67 68 void 69 lwp_sys_init(void) 70 { 71 sleeptab_init(&lwp_park_tab); 72 } 73 74 int 75 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, 76 register_t *retval) 77 { 78 /* { 79 syscallarg(const ucontext_t *) ucp; 80 syscallarg(u_long) flags; 81 syscallarg(lwpid_t *) new_lwp; 82 } */ 83 struct proc *p = l->l_proc; 84 struct lwp *l2; 85 vaddr_t uaddr; 86 ucontext_t *newuc; 87 int error, lid; 88 89 #ifdef KERN_SA 90 mutex_enter(p->p_lock); 91 if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) { 92 mutex_exit(p->p_lock); 93 return EINVAL; 94 } 95 mutex_exit(p->p_lock); 96 #endif 97 98 newuc = pool_get(&lwp_uc_pool, PR_WAITOK); 99 100 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 101 if (error) { 102 pool_put(&lwp_uc_pool, newuc); 103 return error; 104 } 105 106 /* XXX check against resource limits */ 107 108 uaddr = uvm_uarea_alloc(); 109 if (__predict_false(uaddr == 0)) { 110 pool_put(&lwp_uc_pool, newuc); 111 return ENOMEM; 112 } 113 114 error = lwp_create(l, p, uaddr, SCARG(uap, flags) & LWP_DETACHED, 115 NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); 116 if (__predict_false(error)) { 117 uvm_uarea_free(uaddr); 118 pool_put(&lwp_uc_pool, newuc); 119 return error; 120 } 121 122 lid = l2->l_lid; 123 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 124 if (error) { 125 lwp_exit(l2); 126 pool_put(&lwp_uc_pool, newuc); 127 return error; 128 } 129 130 /* 131 * Set the new LWP running, unless the caller has requested that 132 * it be created in suspended state. If the process is stopping, 133 * then the LWP is created stopped. 134 */ 135 mutex_enter(p->p_lock); 136 lwp_lock(l2); 137 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && 138 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 139 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) 140 l2->l_stat = LSSTOP; 141 else { 142 KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex)); 143 p->p_nrlwps++; 144 l2->l_stat = LSRUN; 145 sched_enqueue(l2, false); 146 } 147 lwp_unlock(l2); 148 } else { 149 l2->l_stat = LSSUSPENDED; 150 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock); 151 } 152 mutex_exit(p->p_lock); 153 154 return 0; 155 } 156 157 int 158 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 159 { 160 161 lwp_exit(l); 162 return 0; 163 } 164 165 int 166 sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 167 { 168 169 *retval = l->l_lid; 170 return 0; 171 } 172 173 int 174 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 175 { 176 177 *retval = (uintptr_t)l->l_private; 178 return 0; 179 } 180 181 int 182 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, 183 register_t *retval) 184 { 185 /* { 186 syscallarg(void *) ptr; 187 } */ 188 189 l->l_private = SCARG(uap, ptr); 190 #ifdef __HAVE_CPU_LWP_SETPRIVATE 191 cpu_lwp_setprivate(l, SCARG(uap, ptr)); 192 #endif 193 194 return 0; 195 } 196 197 int 198 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, 199 register_t *retval) 200 { 201 /* { 202 syscallarg(lwpid_t) target; 203 } */ 204 struct proc *p = l->l_proc; 205 struct lwp *t; 206 int error; 207 208 mutex_enter(p->p_lock); 209 210 #ifdef KERN_SA 211 if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) { 212 mutex_exit(p->p_lock); 213 return EINVAL; 214 } 215 #endif 216 217 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 218 mutex_exit(p->p_lock); 219 return ESRCH; 220 } 221 222 /* 223 * Check for deadlock, which is only possible when we're suspending 224 * ourself. XXX There is a short race here, as p_nrlwps is only 225 * incremented when an LWP suspends itself on the kernel/user 226 * boundary. It's still possible to kill -9 the process so we 227 * don't bother checking further. 228 */ 229 lwp_lock(t); 230 if ((t == l && p->p_nrlwps == 1) || 231 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 232 lwp_unlock(t); 233 mutex_exit(p->p_lock); 234 return EDEADLK; 235 } 236 237 /* 238 * Suspend the LWP. XXX If it's on a different CPU, we should wait 239 * for it to be preempted, where it will put itself to sleep. 240 * 241 * Suspension of the current LWP will happen on return to userspace. 242 */ 243 error = lwp_suspend(l, t); 244 if (error) { 245 mutex_exit(p->p_lock); 246 return error; 247 } 248 249 /* 250 * Wait for: 251 * o process exiting 252 * o target LWP suspended 253 * o target LWP not suspended and L_WSUSPEND clear 254 * o target LWP exited 255 */ 256 for (;;) { 257 error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 258 if (error) { 259 error = ERESTART; 260 break; 261 } 262 if (lwp_find(p, SCARG(uap, target)) == NULL) { 263 error = ESRCH; 264 break; 265 } 266 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 267 error = ERESTART; 268 break; 269 } 270 if (t->l_stat == LSSUSPENDED || 271 (t->l_flag & LW_WSUSPEND) == 0) 272 break; 273 } 274 mutex_exit(p->p_lock); 275 276 return error; 277 } 278 279 int 280 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, 281 register_t *retval) 282 { 283 /* { 284 syscallarg(lwpid_t) target; 285 } */ 286 int error; 287 struct proc *p = l->l_proc; 288 struct lwp *t; 289 290 error = 0; 291 292 mutex_enter(p->p_lock); 293 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 294 mutex_exit(p->p_lock); 295 return ESRCH; 296 } 297 298 lwp_lock(t); 299 lwp_continue(t); 300 mutex_exit(p->p_lock); 301 302 return error; 303 } 304 305 int 306 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, 307 register_t *retval) 308 { 309 /* { 310 syscallarg(lwpid_t) target; 311 } */ 312 struct lwp *t; 313 struct proc *p; 314 int error; 315 316 p = l->l_proc; 317 mutex_enter(p->p_lock); 318 319 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 320 mutex_exit(p->p_lock); 321 return ESRCH; 322 } 323 324 lwp_lock(t); 325 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 326 327 if (t->l_stat != LSSLEEP) { 328 lwp_unlock(t); 329 error = ENODEV; 330 } else if ((t->l_flag & LW_SINTR) == 0) { 331 lwp_unlock(t); 332 error = EBUSY; 333 } else { 334 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 335 lwp_unsleep(t, true); 336 error = 0; 337 } 338 339 mutex_exit(p->p_lock); 340 341 return error; 342 } 343 344 int 345 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, 346 register_t *retval) 347 { 348 /* { 349 syscallarg(lwpid_t) wait_for; 350 syscallarg(lwpid_t *) departed; 351 } */ 352 struct proc *p = l->l_proc; 353 int error; 354 lwpid_t dep; 355 356 mutex_enter(p->p_lock); 357 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0); 358 mutex_exit(p->p_lock); 359 360 if (error) 361 return error; 362 363 if (SCARG(uap, departed)) { 364 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 365 if (error) 366 return error; 367 } 368 369 return 0; 370 } 371 372 int 373 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, 374 register_t *retval) 375 { 376 /* { 377 syscallarg(lwpid_t) target; 378 syscallarg(int) signo; 379 } */ 380 struct proc *p = l->l_proc; 381 struct lwp *t; 382 ksiginfo_t ksi; 383 int signo = SCARG(uap, signo); 384 int error = 0; 385 386 if ((u_int)signo >= NSIG) 387 return EINVAL; 388 389 KSI_INIT(&ksi); 390 ksi.ksi_signo = signo; 391 ksi.ksi_code = SI_LWP; 392 ksi.ksi_pid = p->p_pid; 393 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 394 ksi.ksi_lid = SCARG(uap, target); 395 396 mutex_enter(proc_lock); 397 mutex_enter(p->p_lock); 398 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 399 error = ESRCH; 400 else if (signo != 0) 401 kpsignal2(p, &ksi); 402 mutex_exit(p->p_lock); 403 mutex_exit(proc_lock); 404 405 return error; 406 } 407 408 int 409 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, 410 register_t *retval) 411 { 412 /* { 413 syscallarg(lwpid_t) target; 414 } */ 415 struct proc *p; 416 struct lwp *t; 417 lwpid_t target; 418 int error; 419 420 target = SCARG(uap, target); 421 p = l->l_proc; 422 423 mutex_enter(p->p_lock); 424 425 if (l->l_lid == target) 426 t = l; 427 else { 428 /* 429 * We can't use lwp_find() here because the target might 430 * be a zombie. 431 */ 432 LIST_FOREACH(t, &p->p_lwps, l_sibling) 433 if (t->l_lid == target) 434 break; 435 } 436 437 /* 438 * If the LWP is already detached, there's nothing to do. 439 * If it's a zombie, we need to clean up after it. LSZOMB 440 * is visible with the proc mutex held. 441 * 442 * After we have detached or released the LWP, kick any 443 * other LWPs that may be sitting in _lwp_wait(), waiting 444 * for the target LWP to exit. 445 */ 446 if (t != NULL && t->l_stat != LSIDL) { 447 if ((t->l_prflag & LPR_DETACHED) == 0) { 448 p->p_ndlwps++; 449 t->l_prflag |= LPR_DETACHED; 450 if (t->l_stat == LSZOMB) { 451 /* Releases proc mutex. */ 452 lwp_free(t, false, false); 453 return 0; 454 } 455 error = 0; 456 457 /* 458 * Have any LWPs sleeping in lwp_wait() recheck 459 * for deadlock. 460 */ 461 cv_broadcast(&p->p_lwpcv); 462 } else 463 error = EINVAL; 464 } else 465 error = ESRCH; 466 467 mutex_exit(p->p_lock); 468 469 return error; 470 } 471 472 static inline wchan_t 473 lwp_park_wchan(struct proc *p, const void *hint) 474 { 475 476 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 477 } 478 479 int 480 lwp_unpark(lwpid_t target, const void *hint) 481 { 482 sleepq_t *sq; 483 wchan_t wchan; 484 kmutex_t *mp; 485 proc_t *p; 486 lwp_t *t; 487 488 /* 489 * Easy case: search for the LWP on the sleep queue. If 490 * it's parked, remove it from the queue and set running. 491 */ 492 p = curproc; 493 wchan = lwp_park_wchan(p, hint); 494 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 495 496 TAILQ_FOREACH(t, sq, l_sleepchain) 497 if (t->l_proc == p && t->l_lid == target) 498 break; 499 500 if (__predict_true(t != NULL)) { 501 sleepq_remove(sq, t); 502 mutex_spin_exit(mp); 503 return 0; 504 } 505 506 /* 507 * The LWP hasn't parked yet. Take the hit and mark the 508 * operation as pending. 509 */ 510 mutex_spin_exit(mp); 511 512 mutex_enter(p->p_lock); 513 if ((t = lwp_find(p, target)) == NULL) { 514 mutex_exit(p->p_lock); 515 return ESRCH; 516 } 517 518 /* 519 * It may not have parked yet, we may have raced, or it 520 * is parked on a different user sync object. 521 */ 522 lwp_lock(t); 523 if (t->l_syncobj == &lwp_park_sobj) { 524 /* Releases the LWP lock. */ 525 lwp_unsleep(t, true); 526 } else { 527 /* 528 * Set the operation pending. The next call to _lwp_park 529 * will return early. 530 */ 531 t->l_flag |= LW_UNPARKED; 532 lwp_unlock(t); 533 } 534 535 mutex_exit(p->p_lock); 536 return 0; 537 } 538 539 int 540 lwp_park(struct timespec *ts, const void *hint) 541 { 542 struct timespec tsx; 543 sleepq_t *sq; 544 kmutex_t *mp; 545 wchan_t wchan; 546 int timo, error; 547 lwp_t *l; 548 549 /* Fix up the given timeout value. */ 550 if (ts != NULL) { 551 getnanotime(&tsx); 552 timespecsub(ts, &tsx, &tsx); 553 if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0)) 554 return ETIMEDOUT; 555 if ((error = itimespecfix(&tsx)) != 0) 556 return error; 557 timo = tstohz(&tsx); 558 KASSERT(timo != 0); 559 } else 560 timo = 0; 561 562 /* Find and lock the sleep queue. */ 563 l = curlwp; 564 wchan = lwp_park_wchan(l->l_proc, hint); 565 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 566 567 /* 568 * Before going the full route and blocking, check to see if an 569 * unpark op is pending. 570 */ 571 lwp_lock(l); 572 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 573 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 574 lwp_unlock(l); 575 mutex_spin_exit(mp); 576 return EALREADY; 577 } 578 lwp_unlock_to(l, mp); 579 l->l_biglocks = 0; 580 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 581 error = sleepq_block(timo, true); 582 switch (error) { 583 case EWOULDBLOCK: 584 error = ETIMEDOUT; 585 break; 586 case ERESTART: 587 error = EINTR; 588 break; 589 default: 590 /* nothing */ 591 break; 592 } 593 return error; 594 } 595 596 /* 597 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 598 * will remain parked until another LWP in the same process calls in and 599 * requests that it be unparked. 600 */ 601 int 602 sys____lwp_park50(struct lwp *l, const struct sys____lwp_park50_args *uap, 603 register_t *retval) 604 { 605 /* { 606 syscallarg(const struct timespec *) ts; 607 syscallarg(lwpid_t) unpark; 608 syscallarg(const void *) hint; 609 syscallarg(const void *) unparkhint; 610 } */ 611 struct timespec ts, *tsp; 612 int error; 613 614 if (SCARG(uap, ts) == NULL) 615 tsp = NULL; 616 else { 617 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 618 if (error != 0) 619 return error; 620 tsp = &ts; 621 } 622 623 if (SCARG(uap, unpark) != 0) { 624 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 625 if (error != 0) 626 return error; 627 } 628 629 return lwp_park(tsp, SCARG(uap, hint)); 630 } 631 632 int 633 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, 634 register_t *retval) 635 { 636 /* { 637 syscallarg(lwpid_t) target; 638 syscallarg(const void *) hint; 639 } */ 640 641 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 642 } 643 644 int 645 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, 646 register_t *retval) 647 { 648 /* { 649 syscallarg(const lwpid_t *) targets; 650 syscallarg(size_t) ntargets; 651 syscallarg(const void *) hint; 652 } */ 653 struct proc *p; 654 struct lwp *t; 655 sleepq_t *sq; 656 wchan_t wchan; 657 lwpid_t targets[32], *tp, *tpp, *tmax, target; 658 int error; 659 kmutex_t *mp; 660 u_int ntargets; 661 size_t sz; 662 663 p = l->l_proc; 664 ntargets = SCARG(uap, ntargets); 665 666 if (SCARG(uap, targets) == NULL) { 667 /* 668 * Let the caller know how much we are willing to do, and 669 * let it unpark the LWPs in blocks. 670 */ 671 *retval = LWP_UNPARK_MAX; 672 return 0; 673 } 674 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 675 return EINVAL; 676 677 /* 678 * Copy in the target array. If it's a small number of LWPs, then 679 * place the numbers on the stack. 680 */ 681 sz = sizeof(target) * ntargets; 682 if (sz <= sizeof(targets)) 683 tp = targets; 684 else { 685 tp = kmem_alloc(sz, KM_SLEEP); 686 if (tp == NULL) 687 return ENOMEM; 688 } 689 error = copyin(SCARG(uap, targets), tp, sz); 690 if (error != 0) { 691 if (tp != targets) { 692 kmem_free(tp, sz); 693 } 694 return error; 695 } 696 697 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 698 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 699 700 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 701 target = *tpp; 702 703 /* 704 * Easy case: search for the LWP on the sleep queue. If 705 * it's parked, remove it from the queue and set running. 706 */ 707 TAILQ_FOREACH(t, sq, l_sleepchain) 708 if (t->l_proc == p && t->l_lid == target) 709 break; 710 711 if (t != NULL) { 712 sleepq_remove(sq, t); 713 continue; 714 } 715 716 /* 717 * The LWP hasn't parked yet. Take the hit and 718 * mark the operation as pending. 719 */ 720 mutex_spin_exit(mp); 721 mutex_enter(p->p_lock); 722 if ((t = lwp_find(p, target)) == NULL) { 723 mutex_exit(p->p_lock); 724 mutex_spin_enter(mp); 725 continue; 726 } 727 lwp_lock(t); 728 729 /* 730 * It may not have parked yet, we may have raced, or 731 * it is parked on a different user sync object. 732 */ 733 if (t->l_syncobj == &lwp_park_sobj) { 734 /* Releases the LWP lock. */ 735 lwp_unsleep(t, true); 736 } else { 737 /* 738 * Set the operation pending. The next call to 739 * _lwp_park will return early. 740 */ 741 t->l_flag |= LW_UNPARKED; 742 lwp_unlock(t); 743 } 744 745 mutex_exit(p->p_lock); 746 mutex_spin_enter(mp); 747 } 748 749 mutex_spin_exit(mp); 750 if (tp != targets) 751 kmem_free(tp, sz); 752 753 return 0; 754 } 755 756 int 757 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, 758 register_t *retval) 759 { 760 /* { 761 syscallarg(lwpid_t) target; 762 syscallarg(const char *) name; 763 } */ 764 char *name, *oname; 765 lwpid_t target; 766 proc_t *p; 767 lwp_t *t; 768 int error; 769 770 if ((target = SCARG(uap, target)) == 0) 771 target = l->l_lid; 772 773 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 774 if (name == NULL) 775 return ENOMEM; 776 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 777 switch (error) { 778 case ENAMETOOLONG: 779 case 0: 780 name[MAXCOMLEN - 1] = '\0'; 781 break; 782 default: 783 kmem_free(name, MAXCOMLEN); 784 return error; 785 } 786 787 p = curproc; 788 mutex_enter(p->p_lock); 789 if ((t = lwp_find(p, target)) == NULL) { 790 mutex_exit(p->p_lock); 791 kmem_free(name, MAXCOMLEN); 792 return ESRCH; 793 } 794 lwp_lock(t); 795 oname = t->l_name; 796 t->l_name = name; 797 lwp_unlock(t); 798 mutex_exit(p->p_lock); 799 800 if (oname != NULL) 801 kmem_free(oname, MAXCOMLEN); 802 803 return 0; 804 } 805 806 int 807 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, 808 register_t *retval) 809 { 810 /* { 811 syscallarg(lwpid_t) target; 812 syscallarg(char *) name; 813 syscallarg(size_t) len; 814 } */ 815 char name[MAXCOMLEN]; 816 lwpid_t target; 817 proc_t *p; 818 lwp_t *t; 819 820 if ((target = SCARG(uap, target)) == 0) 821 target = l->l_lid; 822 823 p = curproc; 824 mutex_enter(p->p_lock); 825 if ((t = lwp_find(p, target)) == NULL) { 826 mutex_exit(p->p_lock); 827 return ESRCH; 828 } 829 lwp_lock(t); 830 if (t->l_name == NULL) 831 name[0] = '\0'; 832 else 833 strcpy(name, t->l_name); 834 lwp_unlock(t); 835 mutex_exit(p->p_lock); 836 837 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 838 } 839 840 int 841 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, 842 register_t *retval) 843 { 844 /* { 845 syscallarg(int) features; 846 syscallarg(struct lwpctl **) address; 847 } */ 848 int error, features; 849 vaddr_t vaddr; 850 851 features = SCARG(uap, features); 852 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 853 if (features != 0) 854 return ENODEV; 855 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 856 return error; 857 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 858 } 859