1 /* $NetBSD: sys_lwp.c,v 1.51 2010/06/13 04:13:32 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 34 * of LWPs. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.51 2010/06/13 04:13:32 yamt Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/pool.h> 43 #include <sys/proc.h> 44 #include <sys/types.h> 45 #include <sys/syscallargs.h> 46 #include <sys/kauth.h> 47 #include <sys/kmem.h> 48 #include <sys/sleepq.h> 49 #include <sys/lwpctl.h> 50 #include <sys/cpu.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include "opt_sa.h" 55 56 #define LWP_UNPARK_MAX 1024 57 58 static syncobj_t lwp_park_sobj = { 59 SOBJ_SLEEPQ_LIFO, 60 sleepq_unsleep, 61 sleepq_changepri, 62 sleepq_lendpri, 63 syncobj_noowner, 64 }; 65 66 static sleeptab_t lwp_park_tab; 67 68 void 69 lwp_sys_init(void) 70 { 71 sleeptab_init(&lwp_park_tab); 72 } 73 74 int 75 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, 76 register_t *retval) 77 { 78 /* { 79 syscallarg(const ucontext_t *) ucp; 80 syscallarg(u_long) flags; 81 syscallarg(lwpid_t *) new_lwp; 82 } */ 83 struct proc *p = l->l_proc; 84 struct lwp *l2; 85 struct schedstate_percpu *spc; 86 vaddr_t uaddr; 87 ucontext_t *newuc; 88 int error, lid; 89 90 #ifdef KERN_SA 91 mutex_enter(p->p_lock); 92 if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) { 93 mutex_exit(p->p_lock); 94 return EINVAL; 95 } 96 mutex_exit(p->p_lock); 97 #endif 98 99 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP); 100 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 101 if (error) { 102 kmem_free(newuc, sizeof(ucontext_t)); 103 return error; 104 } 105 106 /* XXX check against resource limits */ 107 108 uaddr = uvm_uarea_alloc(); 109 if (__predict_false(uaddr == 0)) { 110 kmem_free(newuc, sizeof(ucontext_t)); 111 return ENOMEM; 112 } 113 114 error = lwp_create(l, p, uaddr, SCARG(uap, flags) & LWP_DETACHED, 115 NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); 116 if (__predict_false(error)) { 117 uvm_uarea_free(uaddr); 118 kmem_free(newuc, sizeof(ucontext_t)); 119 return error; 120 } 121 122 lid = l2->l_lid; 123 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 124 if (error) { 125 lwp_exit(l2); 126 kmem_free(newuc, sizeof(ucontext_t)); 127 return error; 128 } 129 130 /* 131 * Set the new LWP running, unless the caller has requested that 132 * it be created in suspended state. If the process is stopping, 133 * then the LWP is created stopped. 134 */ 135 mutex_enter(p->p_lock); 136 lwp_lock(l2); 137 spc = &l2->l_cpu->ci_schedstate; 138 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && 139 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 140 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 141 KASSERT(l2->l_wchan == NULL); 142 l2->l_stat = LSSTOP; 143 p->p_nrlwps--; 144 lwp_unlock_to(l2, spc->spc_lwplock); 145 } else { 146 KASSERT(lwp_locked(l2, spc->spc_mutex)); 147 l2->l_stat = LSRUN; 148 sched_enqueue(l2, false); 149 lwp_unlock(l2); 150 } 151 } else { 152 l2->l_stat = LSSUSPENDED; 153 p->p_nrlwps--; 154 lwp_unlock_to(l2, spc->spc_lwplock); 155 } 156 mutex_exit(p->p_lock); 157 158 return 0; 159 } 160 161 int 162 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 163 { 164 165 lwp_exit(l); 166 return 0; 167 } 168 169 int 170 sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 171 { 172 173 *retval = l->l_lid; 174 return 0; 175 } 176 177 int 178 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 179 { 180 181 *retval = (uintptr_t)l->l_private; 182 return 0; 183 } 184 185 int 186 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, 187 register_t *retval) 188 { 189 /* { 190 syscallarg(void *) ptr; 191 } */ 192 193 l->l_private = SCARG(uap, ptr); 194 #ifdef __HAVE_CPU_LWP_SETPRIVATE 195 cpu_lwp_setprivate(l, SCARG(uap, ptr)); 196 #endif 197 198 return 0; 199 } 200 201 int 202 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, 203 register_t *retval) 204 { 205 /* { 206 syscallarg(lwpid_t) target; 207 } */ 208 struct proc *p = l->l_proc; 209 struct lwp *t; 210 int error; 211 212 mutex_enter(p->p_lock); 213 214 #ifdef KERN_SA 215 if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) { 216 mutex_exit(p->p_lock); 217 return EINVAL; 218 } 219 #endif 220 221 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 222 mutex_exit(p->p_lock); 223 return ESRCH; 224 } 225 226 /* 227 * Check for deadlock, which is only possible when we're suspending 228 * ourself. XXX There is a short race here, as p_nrlwps is only 229 * incremented when an LWP suspends itself on the kernel/user 230 * boundary. It's still possible to kill -9 the process so we 231 * don't bother checking further. 232 */ 233 lwp_lock(t); 234 if ((t == l && p->p_nrlwps == 1) || 235 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 236 lwp_unlock(t); 237 mutex_exit(p->p_lock); 238 return EDEADLK; 239 } 240 241 /* 242 * Suspend the LWP. XXX If it's on a different CPU, we should wait 243 * for it to be preempted, where it will put itself to sleep. 244 * 245 * Suspension of the current LWP will happen on return to userspace. 246 */ 247 error = lwp_suspend(l, t); 248 if (error) { 249 mutex_exit(p->p_lock); 250 return error; 251 } 252 253 /* 254 * Wait for: 255 * o process exiting 256 * o target LWP suspended 257 * o target LWP not suspended and L_WSUSPEND clear 258 * o target LWP exited 259 */ 260 for (;;) { 261 error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 262 if (error) { 263 error = ERESTART; 264 break; 265 } 266 if (lwp_find(p, SCARG(uap, target)) == NULL) { 267 error = ESRCH; 268 break; 269 } 270 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 271 error = ERESTART; 272 break; 273 } 274 if (t->l_stat == LSSUSPENDED || 275 (t->l_flag & LW_WSUSPEND) == 0) 276 break; 277 } 278 mutex_exit(p->p_lock); 279 280 return error; 281 } 282 283 int 284 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, 285 register_t *retval) 286 { 287 /* { 288 syscallarg(lwpid_t) target; 289 } */ 290 int error; 291 struct proc *p = l->l_proc; 292 struct lwp *t; 293 294 error = 0; 295 296 mutex_enter(p->p_lock); 297 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 298 mutex_exit(p->p_lock); 299 return ESRCH; 300 } 301 302 lwp_lock(t); 303 lwp_continue(t); 304 mutex_exit(p->p_lock); 305 306 return error; 307 } 308 309 int 310 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, 311 register_t *retval) 312 { 313 /* { 314 syscallarg(lwpid_t) target; 315 } */ 316 struct lwp *t; 317 struct proc *p; 318 int error; 319 320 p = l->l_proc; 321 mutex_enter(p->p_lock); 322 323 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 324 mutex_exit(p->p_lock); 325 return ESRCH; 326 } 327 328 lwp_lock(t); 329 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 330 331 if (t->l_stat != LSSLEEP) { 332 lwp_unlock(t); 333 error = ENODEV; 334 } else if ((t->l_flag & LW_SINTR) == 0) { 335 lwp_unlock(t); 336 error = EBUSY; 337 } else { 338 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 339 lwp_unsleep(t, true); 340 error = 0; 341 } 342 343 mutex_exit(p->p_lock); 344 345 return error; 346 } 347 348 int 349 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, 350 register_t *retval) 351 { 352 /* { 353 syscallarg(lwpid_t) wait_for; 354 syscallarg(lwpid_t *) departed; 355 } */ 356 struct proc *p = l->l_proc; 357 int error; 358 lwpid_t dep; 359 360 mutex_enter(p->p_lock); 361 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0); 362 mutex_exit(p->p_lock); 363 364 if (error) 365 return error; 366 367 if (SCARG(uap, departed)) { 368 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 369 if (error) 370 return error; 371 } 372 373 return 0; 374 } 375 376 int 377 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, 378 register_t *retval) 379 { 380 /* { 381 syscallarg(lwpid_t) target; 382 syscallarg(int) signo; 383 } */ 384 struct proc *p = l->l_proc; 385 struct lwp *t; 386 ksiginfo_t ksi; 387 int signo = SCARG(uap, signo); 388 int error = 0; 389 390 if ((u_int)signo >= NSIG) 391 return EINVAL; 392 393 KSI_INIT(&ksi); 394 ksi.ksi_signo = signo; 395 ksi.ksi_code = SI_LWP; 396 ksi.ksi_pid = p->p_pid; 397 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 398 ksi.ksi_lid = SCARG(uap, target); 399 400 mutex_enter(proc_lock); 401 mutex_enter(p->p_lock); 402 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 403 error = ESRCH; 404 else if (signo != 0) 405 kpsignal2(p, &ksi); 406 mutex_exit(p->p_lock); 407 mutex_exit(proc_lock); 408 409 return error; 410 } 411 412 int 413 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, 414 register_t *retval) 415 { 416 /* { 417 syscallarg(lwpid_t) target; 418 } */ 419 struct proc *p; 420 struct lwp *t; 421 lwpid_t target; 422 int error; 423 424 target = SCARG(uap, target); 425 p = l->l_proc; 426 427 mutex_enter(p->p_lock); 428 429 if (l->l_lid == target) 430 t = l; 431 else { 432 /* 433 * We can't use lwp_find() here because the target might 434 * be a zombie. 435 */ 436 LIST_FOREACH(t, &p->p_lwps, l_sibling) 437 if (t->l_lid == target) 438 break; 439 } 440 441 /* 442 * If the LWP is already detached, there's nothing to do. 443 * If it's a zombie, we need to clean up after it. LSZOMB 444 * is visible with the proc mutex held. 445 * 446 * After we have detached or released the LWP, kick any 447 * other LWPs that may be sitting in _lwp_wait(), waiting 448 * for the target LWP to exit. 449 */ 450 if (t != NULL && t->l_stat != LSIDL) { 451 if ((t->l_prflag & LPR_DETACHED) == 0) { 452 p->p_ndlwps++; 453 t->l_prflag |= LPR_DETACHED; 454 if (t->l_stat == LSZOMB) { 455 /* Releases proc mutex. */ 456 lwp_free(t, false, false); 457 return 0; 458 } 459 error = 0; 460 461 /* 462 * Have any LWPs sleeping in lwp_wait() recheck 463 * for deadlock. 464 */ 465 cv_broadcast(&p->p_lwpcv); 466 } else 467 error = EINVAL; 468 } else 469 error = ESRCH; 470 471 mutex_exit(p->p_lock); 472 473 return error; 474 } 475 476 static inline wchan_t 477 lwp_park_wchan(struct proc *p, const void *hint) 478 { 479 480 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 481 } 482 483 int 484 lwp_unpark(lwpid_t target, const void *hint) 485 { 486 sleepq_t *sq; 487 wchan_t wchan; 488 kmutex_t *mp; 489 proc_t *p; 490 lwp_t *t; 491 492 /* 493 * Easy case: search for the LWP on the sleep queue. If 494 * it's parked, remove it from the queue and set running. 495 */ 496 p = curproc; 497 wchan = lwp_park_wchan(p, hint); 498 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 499 500 TAILQ_FOREACH(t, sq, l_sleepchain) 501 if (t->l_proc == p && t->l_lid == target) 502 break; 503 504 if (__predict_true(t != NULL)) { 505 sleepq_remove(sq, t); 506 mutex_spin_exit(mp); 507 return 0; 508 } 509 510 /* 511 * The LWP hasn't parked yet. Take the hit and mark the 512 * operation as pending. 513 */ 514 mutex_spin_exit(mp); 515 516 mutex_enter(p->p_lock); 517 if ((t = lwp_find(p, target)) == NULL) { 518 mutex_exit(p->p_lock); 519 return ESRCH; 520 } 521 522 /* 523 * It may not have parked yet, we may have raced, or it 524 * is parked on a different user sync object. 525 */ 526 lwp_lock(t); 527 if (t->l_syncobj == &lwp_park_sobj) { 528 /* Releases the LWP lock. */ 529 lwp_unsleep(t, true); 530 } else { 531 /* 532 * Set the operation pending. The next call to _lwp_park 533 * will return early. 534 */ 535 t->l_flag |= LW_UNPARKED; 536 lwp_unlock(t); 537 } 538 539 mutex_exit(p->p_lock); 540 return 0; 541 } 542 543 int 544 lwp_park(struct timespec *ts, const void *hint) 545 { 546 sleepq_t *sq; 547 kmutex_t *mp; 548 wchan_t wchan; 549 int timo, error; 550 lwp_t *l; 551 552 /* Fix up the given timeout value. */ 553 if (ts != NULL) { 554 error = abstimeout2timo(ts, &timo); 555 if (error) { 556 return error; 557 } 558 KASSERT(timo != 0); 559 } else { 560 timo = 0; 561 } 562 563 /* Find and lock the sleep queue. */ 564 l = curlwp; 565 wchan = lwp_park_wchan(l->l_proc, hint); 566 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 567 568 /* 569 * Before going the full route and blocking, check to see if an 570 * unpark op is pending. 571 */ 572 lwp_lock(l); 573 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 574 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 575 lwp_unlock(l); 576 mutex_spin_exit(mp); 577 return EALREADY; 578 } 579 lwp_unlock_to(l, mp); 580 l->l_biglocks = 0; 581 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 582 error = sleepq_block(timo, true); 583 switch (error) { 584 case EWOULDBLOCK: 585 error = ETIMEDOUT; 586 break; 587 case ERESTART: 588 error = EINTR; 589 break; 590 default: 591 /* nothing */ 592 break; 593 } 594 return error; 595 } 596 597 /* 598 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 599 * will remain parked until another LWP in the same process calls in and 600 * requests that it be unparked. 601 */ 602 int 603 sys____lwp_park50(struct lwp *l, const struct sys____lwp_park50_args *uap, 604 register_t *retval) 605 { 606 /* { 607 syscallarg(const struct timespec *) ts; 608 syscallarg(lwpid_t) unpark; 609 syscallarg(const void *) hint; 610 syscallarg(const void *) unparkhint; 611 } */ 612 struct timespec ts, *tsp; 613 int error; 614 615 if (SCARG(uap, ts) == NULL) 616 tsp = NULL; 617 else { 618 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 619 if (error != 0) 620 return error; 621 tsp = &ts; 622 } 623 624 if (SCARG(uap, unpark) != 0) { 625 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 626 if (error != 0) 627 return error; 628 } 629 630 return lwp_park(tsp, SCARG(uap, hint)); 631 } 632 633 int 634 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, 635 register_t *retval) 636 { 637 /* { 638 syscallarg(lwpid_t) target; 639 syscallarg(const void *) hint; 640 } */ 641 642 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 643 } 644 645 int 646 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, 647 register_t *retval) 648 { 649 /* { 650 syscallarg(const lwpid_t *) targets; 651 syscallarg(size_t) ntargets; 652 syscallarg(const void *) hint; 653 } */ 654 struct proc *p; 655 struct lwp *t; 656 sleepq_t *sq; 657 wchan_t wchan; 658 lwpid_t targets[32], *tp, *tpp, *tmax, target; 659 int error; 660 kmutex_t *mp; 661 u_int ntargets; 662 size_t sz; 663 664 p = l->l_proc; 665 ntargets = SCARG(uap, ntargets); 666 667 if (SCARG(uap, targets) == NULL) { 668 /* 669 * Let the caller know how much we are willing to do, and 670 * let it unpark the LWPs in blocks. 671 */ 672 *retval = LWP_UNPARK_MAX; 673 return 0; 674 } 675 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 676 return EINVAL; 677 678 /* 679 * Copy in the target array. If it's a small number of LWPs, then 680 * place the numbers on the stack. 681 */ 682 sz = sizeof(target) * ntargets; 683 if (sz <= sizeof(targets)) 684 tp = targets; 685 else { 686 tp = kmem_alloc(sz, KM_SLEEP); 687 if (tp == NULL) 688 return ENOMEM; 689 } 690 error = copyin(SCARG(uap, targets), tp, sz); 691 if (error != 0) { 692 if (tp != targets) { 693 kmem_free(tp, sz); 694 } 695 return error; 696 } 697 698 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 699 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 700 701 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 702 target = *tpp; 703 704 /* 705 * Easy case: search for the LWP on the sleep queue. If 706 * it's parked, remove it from the queue and set running. 707 */ 708 TAILQ_FOREACH(t, sq, l_sleepchain) 709 if (t->l_proc == p && t->l_lid == target) 710 break; 711 712 if (t != NULL) { 713 sleepq_remove(sq, t); 714 continue; 715 } 716 717 /* 718 * The LWP hasn't parked yet. Take the hit and 719 * mark the operation as pending. 720 */ 721 mutex_spin_exit(mp); 722 mutex_enter(p->p_lock); 723 if ((t = lwp_find(p, target)) == NULL) { 724 mutex_exit(p->p_lock); 725 mutex_spin_enter(mp); 726 continue; 727 } 728 lwp_lock(t); 729 730 /* 731 * It may not have parked yet, we may have raced, or 732 * it is parked on a different user sync object. 733 */ 734 if (t->l_syncobj == &lwp_park_sobj) { 735 /* Releases the LWP lock. */ 736 lwp_unsleep(t, true); 737 } else { 738 /* 739 * Set the operation pending. The next call to 740 * _lwp_park will return early. 741 */ 742 t->l_flag |= LW_UNPARKED; 743 lwp_unlock(t); 744 } 745 746 mutex_exit(p->p_lock); 747 mutex_spin_enter(mp); 748 } 749 750 mutex_spin_exit(mp); 751 if (tp != targets) 752 kmem_free(tp, sz); 753 754 return 0; 755 } 756 757 int 758 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, 759 register_t *retval) 760 { 761 /* { 762 syscallarg(lwpid_t) target; 763 syscallarg(const char *) name; 764 } */ 765 char *name, *oname; 766 lwpid_t target; 767 proc_t *p; 768 lwp_t *t; 769 int error; 770 771 if ((target = SCARG(uap, target)) == 0) 772 target = l->l_lid; 773 774 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 775 if (name == NULL) 776 return ENOMEM; 777 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 778 switch (error) { 779 case ENAMETOOLONG: 780 case 0: 781 name[MAXCOMLEN - 1] = '\0'; 782 break; 783 default: 784 kmem_free(name, MAXCOMLEN); 785 return error; 786 } 787 788 p = curproc; 789 mutex_enter(p->p_lock); 790 if ((t = lwp_find(p, target)) == NULL) { 791 mutex_exit(p->p_lock); 792 kmem_free(name, MAXCOMLEN); 793 return ESRCH; 794 } 795 lwp_lock(t); 796 oname = t->l_name; 797 t->l_name = name; 798 lwp_unlock(t); 799 mutex_exit(p->p_lock); 800 801 if (oname != NULL) 802 kmem_free(oname, MAXCOMLEN); 803 804 return 0; 805 } 806 807 int 808 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, 809 register_t *retval) 810 { 811 /* { 812 syscallarg(lwpid_t) target; 813 syscallarg(char *) name; 814 syscallarg(size_t) len; 815 } */ 816 char name[MAXCOMLEN]; 817 lwpid_t target; 818 proc_t *p; 819 lwp_t *t; 820 821 if ((target = SCARG(uap, target)) == 0) 822 target = l->l_lid; 823 824 p = curproc; 825 mutex_enter(p->p_lock); 826 if ((t = lwp_find(p, target)) == NULL) { 827 mutex_exit(p->p_lock); 828 return ESRCH; 829 } 830 lwp_lock(t); 831 if (t->l_name == NULL) 832 name[0] = '\0'; 833 else 834 strcpy(name, t->l_name); 835 lwp_unlock(t); 836 mutex_exit(p->p_lock); 837 838 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 839 } 840 841 int 842 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, 843 register_t *retval) 844 { 845 /* { 846 syscallarg(int) features; 847 syscallarg(struct lwpctl **) address; 848 } */ 849 int error, features; 850 vaddr_t vaddr; 851 852 features = SCARG(uap, features); 853 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 854 if (features != 0) 855 return ENODEV; 856 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 857 return error; 858 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 859 } 860