1 /* $NetBSD: sys_lwp.c,v 1.63 2018/01/30 07:52:23 ozaki-r Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 34 * of LWPs. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.63 2018/01/30 07:52:23 ozaki-r Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/pool.h> 43 #include <sys/proc.h> 44 #include <sys/types.h> 45 #include <sys/syscallargs.h> 46 #include <sys/kauth.h> 47 #include <sys/kmem.h> 48 #include <sys/sleepq.h> 49 #include <sys/lwpctl.h> 50 #include <sys/cpu.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #define LWP_UNPARK_MAX 1024 55 56 static syncobj_t lwp_park_sobj = { 57 .sobj_flag = SOBJ_SLEEPQ_LIFO, 58 .sobj_unsleep = sleepq_unsleep, 59 .sobj_changepri = sleepq_changepri, 60 .sobj_lendpri = sleepq_lendpri, 61 .sobj_owner = syncobj_noowner, 62 }; 63 64 static sleeptab_t lwp_park_tab; 65 66 void 67 lwp_sys_init(void) 68 { 69 sleeptab_init(&lwp_park_tab); 70 } 71 72 int 73 do_lwp_create(lwp_t *l, void *arg, u_long flags, lwpid_t *new_lwp, 74 const sigset_t *sigmask, const stack_t *sigstk) 75 { 76 struct proc *p = l->l_proc; 77 struct lwp *l2; 78 struct schedstate_percpu *spc; 79 vaddr_t uaddr; 80 int error; 81 82 /* XXX check against resource limits */ 83 84 uaddr = uvm_uarea_alloc(); 85 if (__predict_false(uaddr == 0)) 86 return ENOMEM; 87 88 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0, 89 p->p_emul->e_startlwp, arg, &l2, l->l_class, sigmask, &SS_INIT); 90 if (__predict_false(error)) { 91 uvm_uarea_free(uaddr); 92 return error; 93 } 94 95 *new_lwp = l2->l_lid; 96 97 /* 98 * Set the new LWP running, unless the caller has requested that 99 * it be created in suspended state. If the process is stopping, 100 * then the LWP is created stopped. 101 */ 102 mutex_enter(p->p_lock); 103 lwp_lock(l2); 104 spc = &l2->l_cpu->ci_schedstate; 105 if ((flags & LWP_SUSPENDED) == 0 && 106 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 107 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 108 KASSERT(l2->l_wchan == NULL); 109 l2->l_stat = LSSTOP; 110 p->p_nrlwps--; 111 lwp_unlock_to(l2, spc->spc_lwplock); 112 } else { 113 KASSERT(lwp_locked(l2, spc->spc_mutex)); 114 l2->l_stat = LSRUN; 115 sched_enqueue(l2, false); 116 lwp_unlock(l2); 117 } 118 } else { 119 l2->l_stat = LSSUSPENDED; 120 p->p_nrlwps--; 121 lwp_unlock_to(l2, spc->spc_lwplock); 122 } 123 mutex_exit(p->p_lock); 124 125 return 0; 126 } 127 128 int 129 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, 130 register_t *retval) 131 { 132 /* { 133 syscallarg(const ucontext_t *) ucp; 134 syscallarg(u_long) flags; 135 syscallarg(lwpid_t *) new_lwp; 136 } */ 137 struct proc *p = l->l_proc; 138 ucontext_t *newuc; 139 lwpid_t lid; 140 int error; 141 142 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP); 143 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 144 if (error) 145 goto fail; 146 147 /* validate the ucontext */ 148 if ((newuc->uc_flags & _UC_CPU) == 0) { 149 error = EINVAL; 150 goto fail; 151 } 152 error = cpu_mcontext_validate(l, &newuc->uc_mcontext); 153 if (error) 154 goto fail; 155 156 const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ? 157 &newuc->uc_sigmask : &l->l_sigmask; 158 error = do_lwp_create(l, newuc, SCARG(uap, flags), &lid, sigmask, 159 &SS_INIT); 160 if (error) 161 goto fail; 162 163 /* 164 * do not free ucontext in case of an error here, 165 * the lwp will actually run and access it 166 */ 167 return copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 168 169 fail: 170 kmem_free(newuc, sizeof(ucontext_t)); 171 return error; 172 } 173 174 int 175 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 176 { 177 178 lwp_exit(l); 179 return 0; 180 } 181 182 int 183 sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 184 { 185 186 *retval = l->l_lid; 187 return 0; 188 } 189 190 int 191 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 192 { 193 194 *retval = (uintptr_t)l->l_private; 195 return 0; 196 } 197 198 int 199 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, 200 register_t *retval) 201 { 202 /* { 203 syscallarg(void *) ptr; 204 } */ 205 206 return lwp_setprivate(l, SCARG(uap, ptr)); 207 } 208 209 int 210 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, 211 register_t *retval) 212 { 213 /* { 214 syscallarg(lwpid_t) target; 215 } */ 216 struct proc *p = l->l_proc; 217 struct lwp *t; 218 int error; 219 220 mutex_enter(p->p_lock); 221 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 222 mutex_exit(p->p_lock); 223 return ESRCH; 224 } 225 226 /* 227 * Check for deadlock, which is only possible when we're suspending 228 * ourself. XXX There is a short race here, as p_nrlwps is only 229 * incremented when an LWP suspends itself on the kernel/user 230 * boundary. It's still possible to kill -9 the process so we 231 * don't bother checking further. 232 */ 233 lwp_lock(t); 234 if ((t == l && p->p_nrlwps == 1) || 235 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 236 lwp_unlock(t); 237 mutex_exit(p->p_lock); 238 return EDEADLK; 239 } 240 241 /* 242 * Suspend the LWP. XXX If it's on a different CPU, we should wait 243 * for it to be preempted, where it will put itself to sleep. 244 * 245 * Suspension of the current LWP will happen on return to userspace. 246 */ 247 error = lwp_suspend(l, t); 248 if (error) { 249 mutex_exit(p->p_lock); 250 return error; 251 } 252 253 /* 254 * Wait for: 255 * o process exiting 256 * o target LWP suspended 257 * o target LWP not suspended and L_WSUSPEND clear 258 * o target LWP exited 259 */ 260 for (;;) { 261 error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 262 if (error) { 263 error = ERESTART; 264 break; 265 } 266 if (lwp_find(p, SCARG(uap, target)) == NULL) { 267 error = ESRCH; 268 break; 269 } 270 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 271 error = ERESTART; 272 break; 273 } 274 if (t->l_stat == LSSUSPENDED || 275 (t->l_flag & LW_WSUSPEND) == 0) 276 break; 277 } 278 mutex_exit(p->p_lock); 279 280 return error; 281 } 282 283 int 284 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, 285 register_t *retval) 286 { 287 /* { 288 syscallarg(lwpid_t) target; 289 } */ 290 int error; 291 struct proc *p = l->l_proc; 292 struct lwp *t; 293 294 error = 0; 295 296 mutex_enter(p->p_lock); 297 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 298 mutex_exit(p->p_lock); 299 return ESRCH; 300 } 301 302 lwp_lock(t); 303 lwp_continue(t); 304 mutex_exit(p->p_lock); 305 306 return error; 307 } 308 309 int 310 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, 311 register_t *retval) 312 { 313 /* { 314 syscallarg(lwpid_t) target; 315 } */ 316 struct lwp *t; 317 struct proc *p; 318 int error; 319 320 p = l->l_proc; 321 mutex_enter(p->p_lock); 322 323 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 324 mutex_exit(p->p_lock); 325 return ESRCH; 326 } 327 328 lwp_lock(t); 329 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 330 331 if (t->l_stat != LSSLEEP) { 332 lwp_unlock(t); 333 error = ENODEV; 334 } else if ((t->l_flag & LW_SINTR) == 0) { 335 lwp_unlock(t); 336 error = EBUSY; 337 } else { 338 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 339 lwp_unsleep(t, true); 340 error = 0; 341 } 342 343 mutex_exit(p->p_lock); 344 345 return error; 346 } 347 348 int 349 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, 350 register_t *retval) 351 { 352 /* { 353 syscallarg(lwpid_t) wait_for; 354 syscallarg(lwpid_t *) departed; 355 } */ 356 struct proc *p = l->l_proc; 357 int error; 358 lwpid_t dep; 359 360 mutex_enter(p->p_lock); 361 error = lwp_wait(l, SCARG(uap, wait_for), &dep, false); 362 mutex_exit(p->p_lock); 363 364 if (!error && SCARG(uap, departed)) { 365 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 366 } 367 368 return error; 369 } 370 371 int 372 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, 373 register_t *retval) 374 { 375 /* { 376 syscallarg(lwpid_t) target; 377 syscallarg(int) signo; 378 } */ 379 struct proc *p = l->l_proc; 380 struct lwp *t; 381 ksiginfo_t ksi; 382 int signo = SCARG(uap, signo); 383 int error = 0; 384 385 if ((u_int)signo >= NSIG) 386 return EINVAL; 387 388 KSI_INIT(&ksi); 389 ksi.ksi_signo = signo; 390 ksi.ksi_code = SI_LWP; 391 ksi.ksi_pid = p->p_pid; 392 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 393 ksi.ksi_lid = SCARG(uap, target); 394 395 mutex_enter(proc_lock); 396 mutex_enter(p->p_lock); 397 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 398 error = ESRCH; 399 else if (signo != 0) 400 kpsignal2(p, &ksi); 401 mutex_exit(p->p_lock); 402 mutex_exit(proc_lock); 403 404 return error; 405 } 406 407 int 408 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, 409 register_t *retval) 410 { 411 /* { 412 syscallarg(lwpid_t) target; 413 } */ 414 struct proc *p; 415 struct lwp *t; 416 lwpid_t target; 417 int error; 418 419 target = SCARG(uap, target); 420 p = l->l_proc; 421 422 mutex_enter(p->p_lock); 423 424 if (l->l_lid == target) 425 t = l; 426 else { 427 /* 428 * We can't use lwp_find() here because the target might 429 * be a zombie. 430 */ 431 LIST_FOREACH(t, &p->p_lwps, l_sibling) 432 if (t->l_lid == target) 433 break; 434 } 435 436 /* 437 * If the LWP is already detached, there's nothing to do. 438 * If it's a zombie, we need to clean up after it. LSZOMB 439 * is visible with the proc mutex held. 440 * 441 * After we have detached or released the LWP, kick any 442 * other LWPs that may be sitting in _lwp_wait(), waiting 443 * for the target LWP to exit. 444 */ 445 if (t != NULL && t->l_stat != LSIDL) { 446 if ((t->l_prflag & LPR_DETACHED) == 0) { 447 p->p_ndlwps++; 448 t->l_prflag |= LPR_DETACHED; 449 if (t->l_stat == LSZOMB) { 450 /* Releases proc mutex. */ 451 lwp_free(t, false, false); 452 return 0; 453 } 454 error = 0; 455 456 /* 457 * Have any LWPs sleeping in lwp_wait() recheck 458 * for deadlock. 459 */ 460 cv_broadcast(&p->p_lwpcv); 461 } else 462 error = EINVAL; 463 } else 464 error = ESRCH; 465 466 mutex_exit(p->p_lock); 467 468 return error; 469 } 470 471 static inline wchan_t 472 lwp_park_wchan(struct proc *p, const void *hint) 473 { 474 475 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 476 } 477 478 int 479 lwp_unpark(lwpid_t target, const void *hint) 480 { 481 sleepq_t *sq; 482 wchan_t wchan; 483 kmutex_t *mp; 484 proc_t *p; 485 lwp_t *t; 486 487 /* 488 * Easy case: search for the LWP on the sleep queue. If 489 * it's parked, remove it from the queue and set running. 490 */ 491 p = curproc; 492 wchan = lwp_park_wchan(p, hint); 493 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 494 495 TAILQ_FOREACH(t, sq, l_sleepchain) 496 if (t->l_proc == p && t->l_lid == target) 497 break; 498 499 if (__predict_true(t != NULL)) { 500 sleepq_remove(sq, t); 501 mutex_spin_exit(mp); 502 return 0; 503 } 504 505 /* 506 * The LWP hasn't parked yet. Take the hit and mark the 507 * operation as pending. 508 */ 509 mutex_spin_exit(mp); 510 511 mutex_enter(p->p_lock); 512 if ((t = lwp_find(p, target)) == NULL) { 513 mutex_exit(p->p_lock); 514 return ESRCH; 515 } 516 517 /* 518 * It may not have parked yet, we may have raced, or it 519 * is parked on a different user sync object. 520 */ 521 lwp_lock(t); 522 if (t->l_syncobj == &lwp_park_sobj) { 523 /* Releases the LWP lock. */ 524 lwp_unsleep(t, true); 525 } else { 526 /* 527 * Set the operation pending. The next call to _lwp_park 528 * will return early. 529 */ 530 t->l_flag |= LW_UNPARKED; 531 lwp_unlock(t); 532 } 533 534 mutex_exit(p->p_lock); 535 return 0; 536 } 537 538 int 539 lwp_park(clockid_t clock_id, int flags, struct timespec *ts, const void *hint) 540 { 541 sleepq_t *sq; 542 kmutex_t *mp; 543 wchan_t wchan; 544 int timo, error; 545 struct timespec start; 546 lwp_t *l; 547 bool timeremain = !(flags & TIMER_ABSTIME) && ts; 548 549 if (ts != NULL) { 550 if ((error = ts2timo(clock_id, flags, ts, &timo, 551 timeremain ? &start : NULL)) != 0) 552 return error; 553 KASSERT(timo != 0); 554 } else { 555 timo = 0; 556 } 557 558 /* Find and lock the sleep queue. */ 559 l = curlwp; 560 wchan = lwp_park_wchan(l->l_proc, hint); 561 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 562 563 /* 564 * Before going the full route and blocking, check to see if an 565 * unpark op is pending. 566 */ 567 lwp_lock(l); 568 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 569 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 570 lwp_unlock(l); 571 mutex_spin_exit(mp); 572 return EALREADY; 573 } 574 lwp_unlock_to(l, mp); 575 l->l_biglocks = 0; 576 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 577 error = sleepq_block(timo, true); 578 switch (error) { 579 case EWOULDBLOCK: 580 error = ETIMEDOUT; 581 if (timeremain) 582 memset(ts, 0, sizeof(*ts)); 583 break; 584 case ERESTART: 585 error = EINTR; 586 /*FALLTHROUGH*/ 587 default: 588 if (timeremain) 589 clock_timeleft(clock_id, ts, &start); 590 break; 591 } 592 return error; 593 } 594 595 /* 596 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 597 * will remain parked until another LWP in the same process calls in and 598 * requests that it be unparked. 599 */ 600 int 601 sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap, 602 register_t *retval) 603 { 604 /* { 605 syscallarg(clockid_t) clock_id; 606 syscallarg(int) flags; 607 syscallarg(struct timespec *) ts; 608 syscallarg(lwpid_t) unpark; 609 syscallarg(const void *) hint; 610 syscallarg(const void *) unparkhint; 611 } */ 612 struct timespec ts, *tsp; 613 int error; 614 615 if (SCARG(uap, ts) == NULL) 616 tsp = NULL; 617 else { 618 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 619 if (error != 0) 620 return error; 621 tsp = &ts; 622 } 623 624 if (SCARG(uap, unpark) != 0) { 625 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 626 if (error != 0) 627 return error; 628 } 629 630 error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp, 631 SCARG(uap, hint)); 632 if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0) 633 (void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp)); 634 return error; 635 } 636 637 int 638 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, 639 register_t *retval) 640 { 641 /* { 642 syscallarg(lwpid_t) target; 643 syscallarg(const void *) hint; 644 } */ 645 646 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 647 } 648 649 int 650 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, 651 register_t *retval) 652 { 653 /* { 654 syscallarg(const lwpid_t *) targets; 655 syscallarg(size_t) ntargets; 656 syscallarg(const void *) hint; 657 } */ 658 struct proc *p; 659 struct lwp *t; 660 sleepq_t *sq; 661 wchan_t wchan; 662 lwpid_t targets[32], *tp, *tpp, *tmax, target; 663 int error; 664 kmutex_t *mp; 665 u_int ntargets; 666 size_t sz; 667 668 p = l->l_proc; 669 ntargets = SCARG(uap, ntargets); 670 671 if (SCARG(uap, targets) == NULL) { 672 /* 673 * Let the caller know how much we are willing to do, and 674 * let it unpark the LWPs in blocks. 675 */ 676 *retval = LWP_UNPARK_MAX; 677 return 0; 678 } 679 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 680 return EINVAL; 681 682 /* 683 * Copy in the target array. If it's a small number of LWPs, then 684 * place the numbers on the stack. 685 */ 686 sz = sizeof(target) * ntargets; 687 if (sz <= sizeof(targets)) 688 tp = targets; 689 else 690 tp = kmem_alloc(sz, KM_SLEEP); 691 error = copyin(SCARG(uap, targets), tp, sz); 692 if (error != 0) { 693 if (tp != targets) { 694 kmem_free(tp, sz); 695 } 696 return error; 697 } 698 699 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 700 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 701 702 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 703 target = *tpp; 704 705 /* 706 * Easy case: search for the LWP on the sleep queue. If 707 * it's parked, remove it from the queue and set running. 708 */ 709 TAILQ_FOREACH(t, sq, l_sleepchain) 710 if (t->l_proc == p && t->l_lid == target) 711 break; 712 713 if (t != NULL) { 714 sleepq_remove(sq, t); 715 continue; 716 } 717 718 /* 719 * The LWP hasn't parked yet. Take the hit and 720 * mark the operation as pending. 721 */ 722 mutex_spin_exit(mp); 723 mutex_enter(p->p_lock); 724 if ((t = lwp_find(p, target)) == NULL) { 725 mutex_exit(p->p_lock); 726 mutex_spin_enter(mp); 727 continue; 728 } 729 lwp_lock(t); 730 731 /* 732 * It may not have parked yet, we may have raced, or 733 * it is parked on a different user sync object. 734 */ 735 if (t->l_syncobj == &lwp_park_sobj) { 736 /* Releases the LWP lock. */ 737 lwp_unsleep(t, true); 738 } else { 739 /* 740 * Set the operation pending. The next call to 741 * _lwp_park will return early. 742 */ 743 t->l_flag |= LW_UNPARKED; 744 lwp_unlock(t); 745 } 746 747 mutex_exit(p->p_lock); 748 mutex_spin_enter(mp); 749 } 750 751 mutex_spin_exit(mp); 752 if (tp != targets) 753 kmem_free(tp, sz); 754 755 return 0; 756 } 757 758 int 759 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, 760 register_t *retval) 761 { 762 /* { 763 syscallarg(lwpid_t) target; 764 syscallarg(const char *) name; 765 } */ 766 char *name, *oname; 767 lwpid_t target; 768 proc_t *p; 769 lwp_t *t; 770 int error; 771 772 if ((target = SCARG(uap, target)) == 0) 773 target = l->l_lid; 774 775 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 776 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 777 switch (error) { 778 case ENAMETOOLONG: 779 case 0: 780 name[MAXCOMLEN - 1] = '\0'; 781 break; 782 default: 783 kmem_free(name, MAXCOMLEN); 784 return error; 785 } 786 787 p = curproc; 788 mutex_enter(p->p_lock); 789 if ((t = lwp_find(p, target)) == NULL) { 790 mutex_exit(p->p_lock); 791 kmem_free(name, MAXCOMLEN); 792 return ESRCH; 793 } 794 lwp_lock(t); 795 oname = t->l_name; 796 t->l_name = name; 797 lwp_unlock(t); 798 mutex_exit(p->p_lock); 799 800 if (oname != NULL) 801 kmem_free(oname, MAXCOMLEN); 802 803 return 0; 804 } 805 806 int 807 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, 808 register_t *retval) 809 { 810 /* { 811 syscallarg(lwpid_t) target; 812 syscallarg(char *) name; 813 syscallarg(size_t) len; 814 } */ 815 char name[MAXCOMLEN]; 816 lwpid_t target; 817 proc_t *p; 818 lwp_t *t; 819 820 if ((target = SCARG(uap, target)) == 0) 821 target = l->l_lid; 822 823 p = curproc; 824 mutex_enter(p->p_lock); 825 if ((t = lwp_find(p, target)) == NULL) { 826 mutex_exit(p->p_lock); 827 return ESRCH; 828 } 829 lwp_lock(t); 830 if (t->l_name == NULL) 831 name[0] = '\0'; 832 else 833 strlcpy(name, t->l_name, sizeof(name)); 834 lwp_unlock(t); 835 mutex_exit(p->p_lock); 836 837 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 838 } 839 840 int 841 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, 842 register_t *retval) 843 { 844 /* { 845 syscallarg(int) features; 846 syscallarg(struct lwpctl **) address; 847 } */ 848 int error, features; 849 vaddr_t vaddr; 850 851 features = SCARG(uap, features); 852 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 853 if (features != 0) 854 return ENODEV; 855 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 856 return error; 857 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 858 } 859