1 /* $NetBSD: kern_lwp.c,v 1.178 2014/09/05 05:57:21 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Overview 34 * 35 * Lightweight processes (LWPs) are the basic unit or thread of 36 * execution within the kernel. The core state of an LWP is described 37 * by "struct lwp", also known as lwp_t. 38 * 39 * Each LWP is contained within a process (described by "struct proc"), 40 * Every process contains at least one LWP, but may contain more. The 41 * process describes attributes shared among all of its LWPs such as a 42 * private address space, global execution state (stopped, active, 43 * zombie, ...), signal disposition and so on. On a multiprocessor 44 * machine, multiple LWPs be executing concurrently in the kernel. 45 * 46 * Execution states 47 * 48 * At any given time, an LWP has overall state that is described by 49 * lwp::l_stat. The states are broken into two sets below. The first 50 * set is guaranteed to represent the absolute, current state of the 51 * LWP: 52 * 53 * LSONPROC 54 * 55 * On processor: the LWP is executing on a CPU, either in the 56 * kernel or in user space. 57 * 58 * LSRUN 59 * 60 * Runnable: the LWP is parked on a run queue, and may soon be 61 * chosen to run by an idle processor, or by a processor that 62 * has been asked to preempt a currently runnning but lower 63 * priority LWP. 64 * 65 * LSIDL 66 * 67 * Idle: the LWP has been created but has not yet executed, 68 * or it has ceased executing a unit of work and is waiting 69 * to be started again. 70 * 71 * LSSUSPENDED: 72 * 73 * Suspended: the LWP has had its execution suspended by 74 * another LWP in the same process using the _lwp_suspend() 75 * system call. User-level LWPs also enter the suspended 76 * state when the system is shutting down. 77 * 78 * The second set represent a "statement of intent" on behalf of the 79 * LWP. The LWP may in fact be executing on a processor, may be 80 * sleeping or idle. It is expected to take the necessary action to 81 * stop executing or become "running" again within a short timeframe. 82 * The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running. 83 * Importantly, it indicates that its state is tied to a CPU. 84 * 85 * LSZOMB: 86 * 87 * Dead or dying: the LWP has released most of its resources 88 * and is about to switch away into oblivion, or has already 89 * switched away. When it switches away, its few remaining 90 * resources can be collected. 91 * 92 * LSSLEEP: 93 * 94 * Sleeping: the LWP has entered itself onto a sleep queue, and 95 * has switched away or will switch away shortly to allow other 96 * LWPs to run on the CPU. 97 * 98 * LSSTOP: 99 * 100 * Stopped: the LWP has been stopped as a result of a job 101 * control signal, or as a result of the ptrace() interface. 102 * 103 * Stopped LWPs may run briefly within the kernel to handle 104 * signals that they receive, but will not return to user space 105 * until their process' state is changed away from stopped. 106 * 107 * Single LWPs within a process can not be set stopped 108 * selectively: all actions that can stop or continue LWPs 109 * occur at the process level. 110 * 111 * State transitions 112 * 113 * Note that the LSSTOP state may only be set when returning to 114 * user space in userret(), or when sleeping interruptably. The 115 * LSSUSPENDED state may only be set in userret(). Before setting 116 * those states, we try to ensure that the LWPs will release all 117 * locks that they hold, and at a minimum try to ensure that the 118 * LWP can be set runnable again by a signal. 119 * 120 * LWPs may transition states in the following ways: 121 * 122 * RUN -------> ONPROC ONPROC -----> RUN 123 * > SLEEP 124 * > STOPPED 125 * > SUSPENDED 126 * > ZOMB 127 * > IDL (special cases) 128 * 129 * STOPPED ---> RUN SUSPENDED --> RUN 130 * > SLEEP 131 * 132 * SLEEP -----> ONPROC IDL --------> RUN 133 * > RUN > SUSPENDED 134 * > STOPPED > STOPPED 135 * > ONPROC (special cases) 136 * 137 * Some state transitions are only possible with kernel threads (eg 138 * ONPROC -> IDL) and happen under tightly controlled circumstances 139 * free of unwanted side effects. 140 * 141 * Migration 142 * 143 * Migration of threads from one CPU to another could be performed 144 * internally by the scheduler via sched_takecpu() or sched_catchlwp() 145 * functions. The universal lwp_migrate() function should be used for 146 * any other cases. Subsystems in the kernel must be aware that CPU 147 * of LWP may change, while it is not locked. 148 * 149 * Locking 150 * 151 * The majority of fields in 'struct lwp' are covered by a single, 152 * general spin lock pointed to by lwp::l_mutex. The locks covering 153 * each field are documented in sys/lwp.h. 154 * 155 * State transitions must be made with the LWP's general lock held, 156 * and may cause the LWP's lock pointer to change. Manipulation of 157 * the general lock is not performed directly, but through calls to 158 * lwp_lock(), lwp_unlock() and others. It should be noted that the 159 * adaptive locks are not allowed to be released while the LWP's lock 160 * is being held (unlike for other spin-locks). 161 * 162 * States and their associated locks: 163 * 164 * LSONPROC, LSZOMB: 165 * 166 * Always covered by spc_lwplock, which protects running LWPs. 167 * This is a per-CPU lock and matches lwp::l_cpu. 168 * 169 * LSIDL, LSRUN: 170 * 171 * Always covered by spc_mutex, which protects the run queues. 172 * This is a per-CPU lock and matches lwp::l_cpu. 173 * 174 * LSSLEEP: 175 * 176 * Covered by a lock associated with the sleep queue that the 177 * LWP resides on. Matches lwp::l_sleepq::sq_mutex. 178 * 179 * LSSTOP, LSSUSPENDED: 180 * 181 * If the LWP was previously sleeping (l_wchan != NULL), then 182 * l_mutex references the sleep queue lock. If the LWP was 183 * runnable or on the CPU when halted, or has been removed from 184 * the sleep queue since halted, then the lock is spc_lwplock. 185 * 186 * The lock order is as follows: 187 * 188 * spc::spc_lwplock -> 189 * sleeptab::st_mutex -> 190 * tschain_t::tc_mutex -> 191 * spc::spc_mutex 192 * 193 * Each process has an scheduler state lock (proc::p_lock), and a 194 * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and 195 * so on. When an LWP is to be entered into or removed from one of the 196 * following states, p_lock must be held and the process wide counters 197 * adjusted: 198 * 199 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED 200 * 201 * (But not always for kernel threads. There are some special cases 202 * as mentioned above. See kern_softint.c.) 203 * 204 * Note that an LWP is considered running or likely to run soon if in 205 * one of the following states. This affects the value of p_nrlwps: 206 * 207 * LSRUN, LSONPROC, LSSLEEP 208 * 209 * p_lock does not need to be held when transitioning among these 210 * three states, hence p_lock is rarely taken for state transitions. 211 */ 212 213 #include <sys/cdefs.h> 214 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.178 2014/09/05 05:57:21 matt Exp $"); 215 216 #include "opt_ddb.h" 217 #include "opt_lockdebug.h" 218 #include "opt_dtrace.h" 219 220 #define _LWP_API_PRIVATE 221 222 #include <sys/param.h> 223 #include <sys/systm.h> 224 #include <sys/cpu.h> 225 #include <sys/pool.h> 226 #include <sys/proc.h> 227 #include <sys/syscallargs.h> 228 #include <sys/syscall_stats.h> 229 #include <sys/kauth.h> 230 #include <sys/pserialize.h> 231 #include <sys/sleepq.h> 232 #include <sys/lockdebug.h> 233 #include <sys/kmem.h> 234 #include <sys/pset.h> 235 #include <sys/intr.h> 236 #include <sys/lwpctl.h> 237 #include <sys/atomic.h> 238 #include <sys/filedesc.h> 239 #include <sys/dtrace_bsd.h> 240 #include <sys/sdt.h> 241 #include <sys/xcall.h> 242 #include <sys/uidinfo.h> 243 #include <sys/sysctl.h> 244 245 #include <uvm/uvm_extern.h> 246 #include <uvm/uvm_object.h> 247 248 static pool_cache_t lwp_cache __read_mostly; 249 struct lwplist alllwp __cacheline_aligned; 250 251 static void lwp_dtor(void *, void *); 252 253 /* DTrace proc provider probes */ 254 SDT_PROBE_DEFINE(proc,,,lwp_create,lwp-create, 255 "struct lwp *", NULL, 256 NULL, NULL, NULL, NULL, 257 NULL, NULL, NULL, NULL); 258 SDT_PROBE_DEFINE(proc,,,lwp_start,lwp-start, 259 "struct lwp *", NULL, 260 NULL, NULL, NULL, NULL, 261 NULL, NULL, NULL, NULL); 262 SDT_PROBE_DEFINE(proc,,,lwp_exit,lwp-exit, 263 "struct lwp *", NULL, 264 NULL, NULL, NULL, NULL, 265 NULL, NULL, NULL, NULL); 266 267 struct turnstile turnstile0; 268 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = { 269 #ifdef LWP0_CPU_INFO 270 .l_cpu = LWP0_CPU_INFO, 271 #endif 272 #ifdef LWP0_MD_INITIALIZER 273 .l_md = LWP0_MD_INITIALIZER, 274 #endif 275 .l_proc = &proc0, 276 .l_lid = 1, 277 .l_flag = LW_SYSTEM, 278 .l_stat = LSONPROC, 279 .l_ts = &turnstile0, 280 .l_syncobj = &sched_syncobj, 281 .l_refcnt = 1, 282 .l_priority = PRI_USER + NPRI_USER - 1, 283 .l_inheritedprio = -1, 284 .l_class = SCHED_OTHER, 285 .l_psid = PS_NONE, 286 .l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders), 287 .l_name = __UNCONST("swapper"), 288 .l_fd = &filedesc0, 289 }; 290 291 static int sysctl_kern_maxlwp(SYSCTLFN_PROTO); 292 293 /* 294 * sysctl helper routine for kern.maxlwp. Ensures that the new 295 * values are not too low or too high. 296 */ 297 static int 298 sysctl_kern_maxlwp(SYSCTLFN_ARGS) 299 { 300 int error, nmaxlwp; 301 struct sysctlnode node; 302 303 nmaxlwp = maxlwp; 304 node = *rnode; 305 node.sysctl_data = &nmaxlwp; 306 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 307 if (error || newp == NULL) 308 return error; 309 310 if (nmaxlwp < 0 || nmaxlwp >= 65536) 311 return EINVAL; 312 if (nmaxlwp > cpu_maxlwp()) 313 return EINVAL; 314 maxlwp = nmaxlwp; 315 316 return 0; 317 } 318 319 static void 320 sysctl_kern_lwp_setup(void) 321 { 322 struct sysctllog *clog = NULL; 323 324 sysctl_createv(&clog, 0, NULL, NULL, 325 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 326 CTLTYPE_INT, "maxlwp", 327 SYSCTL_DESCR("Maximum number of simultaneous threads"), 328 sysctl_kern_maxlwp, 0, NULL, 0, 329 CTL_KERN, CTL_CREATE, CTL_EOL); 330 } 331 332 void 333 lwpinit(void) 334 { 335 336 LIST_INIT(&alllwp); 337 lwpinit_specificdata(); 338 lwp_sys_init(); 339 lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0, 340 "lwppl", NULL, IPL_NONE, NULL, lwp_dtor, NULL); 341 342 maxlwp = cpu_maxlwp(); 343 sysctl_kern_lwp_setup(); 344 } 345 346 void 347 lwp0_init(void) 348 { 349 struct lwp *l = &lwp0; 350 351 KASSERT((void *)uvm_lwp_getuarea(l) != NULL); 352 KASSERT(l->l_lid == proc0.p_nlwpid); 353 354 LIST_INSERT_HEAD(&alllwp, l, l_list); 355 356 callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE); 357 callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l); 358 cv_init(&l->l_sigcv, "sigwait"); 359 cv_init(&l->l_waitcv, "vfork"); 360 361 kauth_cred_hold(proc0.p_cred); 362 l->l_cred = proc0.p_cred; 363 364 kdtrace_thread_ctor(NULL, l); 365 lwp_initspecific(l); 366 367 SYSCALL_TIME_LWP_INIT(l); 368 } 369 370 static void 371 lwp_dtor(void *arg, void *obj) 372 { 373 lwp_t *l = obj; 374 uint64_t where; 375 (void)l; 376 377 /* 378 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu() 379 * calls will exit before memory of LWP is returned to the pool, where 380 * KVA of LWP structure might be freed and re-used for other purposes. 381 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu() 382 * callers, therefore cross-call to all CPUs will do the job. Also, 383 * the value of l->l_cpu must be still valid at this point. 384 */ 385 KASSERT(l->l_cpu != NULL); 386 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 387 xc_wait(where); 388 } 389 390 /* 391 * Set an suspended. 392 * 393 * Must be called with p_lock held, and the LWP locked. Will unlock the 394 * LWP before return. 395 */ 396 int 397 lwp_suspend(struct lwp *curl, struct lwp *t) 398 { 399 int error; 400 401 KASSERT(mutex_owned(t->l_proc->p_lock)); 402 KASSERT(lwp_locked(t, NULL)); 403 404 KASSERT(curl != t || curl->l_stat == LSONPROC); 405 406 /* 407 * If the current LWP has been told to exit, we must not suspend anyone 408 * else or deadlock could occur. We won't return to userspace. 409 */ 410 if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) { 411 lwp_unlock(t); 412 return (EDEADLK); 413 } 414 415 error = 0; 416 417 switch (t->l_stat) { 418 case LSRUN: 419 case LSONPROC: 420 t->l_flag |= LW_WSUSPEND; 421 lwp_need_userret(t); 422 lwp_unlock(t); 423 break; 424 425 case LSSLEEP: 426 t->l_flag |= LW_WSUSPEND; 427 428 /* 429 * Kick the LWP and try to get it to the kernel boundary 430 * so that it will release any locks that it holds. 431 * setrunnable() will release the lock. 432 */ 433 if ((t->l_flag & LW_SINTR) != 0) 434 setrunnable(t); 435 else 436 lwp_unlock(t); 437 break; 438 439 case LSSUSPENDED: 440 lwp_unlock(t); 441 break; 442 443 case LSSTOP: 444 t->l_flag |= LW_WSUSPEND; 445 setrunnable(t); 446 break; 447 448 case LSIDL: 449 case LSZOMB: 450 error = EINTR; /* It's what Solaris does..... */ 451 lwp_unlock(t); 452 break; 453 } 454 455 return (error); 456 } 457 458 /* 459 * Restart a suspended LWP. 460 * 461 * Must be called with p_lock held, and the LWP locked. Will unlock the 462 * LWP before return. 463 */ 464 void 465 lwp_continue(struct lwp *l) 466 { 467 468 KASSERT(mutex_owned(l->l_proc->p_lock)); 469 KASSERT(lwp_locked(l, NULL)); 470 471 /* If rebooting or not suspended, then just bail out. */ 472 if ((l->l_flag & LW_WREBOOT) != 0) { 473 lwp_unlock(l); 474 return; 475 } 476 477 l->l_flag &= ~LW_WSUSPEND; 478 479 if (l->l_stat != LSSUSPENDED) { 480 lwp_unlock(l); 481 return; 482 } 483 484 /* setrunnable() will release the lock. */ 485 setrunnable(l); 486 } 487 488 /* 489 * Restart a stopped LWP. 490 * 491 * Must be called with p_lock held, and the LWP NOT locked. Will unlock the 492 * LWP before return. 493 */ 494 void 495 lwp_unstop(struct lwp *l) 496 { 497 struct proc *p = l->l_proc; 498 499 KASSERT(mutex_owned(proc_lock)); 500 KASSERT(mutex_owned(p->p_lock)); 501 502 lwp_lock(l); 503 504 /* If not stopped, then just bail out. */ 505 if (l->l_stat != LSSTOP) { 506 lwp_unlock(l); 507 return; 508 } 509 510 p->p_stat = SACTIVE; 511 p->p_sflag &= ~PS_STOPPING; 512 513 if (!p->p_waited) 514 p->p_pptr->p_nstopchild--; 515 516 if (l->l_wchan == NULL) { 517 /* setrunnable() will release the lock. */ 518 setrunnable(l); 519 } else if (p->p_xstat && (l->l_flag & LW_SINTR) != 0) { 520 /* setrunnable() so we can receive the signal */ 521 setrunnable(l); 522 } else { 523 l->l_stat = LSSLEEP; 524 p->p_nrlwps++; 525 lwp_unlock(l); 526 } 527 } 528 529 /* 530 * Wait for an LWP within the current process to exit. If 'lid' is 531 * non-zero, we are waiting for a specific LWP. 532 * 533 * Must be called with p->p_lock held. 534 */ 535 int 536 lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting) 537 { 538 const lwpid_t curlid = l->l_lid; 539 proc_t *p = l->l_proc; 540 lwp_t *l2; 541 int error; 542 543 KASSERT(mutex_owned(p->p_lock)); 544 545 p->p_nlwpwait++; 546 l->l_waitingfor = lid; 547 548 for (;;) { 549 int nfound; 550 551 /* 552 * Avoid a race between exit1() and sigexit(): if the 553 * process is dumping core, then we need to bail out: call 554 * into lwp_userret() where we will be suspended until the 555 * deed is done. 556 */ 557 if ((p->p_sflag & PS_WCORE) != 0) { 558 mutex_exit(p->p_lock); 559 lwp_userret(l); 560 KASSERT(false); 561 } 562 563 /* 564 * First off, drain any detached LWP that is waiting to be 565 * reaped. 566 */ 567 while ((l2 = p->p_zomblwp) != NULL) { 568 p->p_zomblwp = NULL; 569 lwp_free(l2, false, false);/* releases proc mutex */ 570 mutex_enter(p->p_lock); 571 } 572 573 /* 574 * Now look for an LWP to collect. If the whole process is 575 * exiting, count detached LWPs as eligible to be collected, 576 * but don't drain them here. 577 */ 578 nfound = 0; 579 error = 0; 580 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 581 /* 582 * If a specific wait and the target is waiting on 583 * us, then avoid deadlock. This also traps LWPs 584 * that try to wait on themselves. 585 * 586 * Note that this does not handle more complicated 587 * cycles, like: t1 -> t2 -> t3 -> t1. The process 588 * can still be killed so it is not a major problem. 589 */ 590 if (l2->l_lid == lid && l2->l_waitingfor == curlid) { 591 error = EDEADLK; 592 break; 593 } 594 if (l2 == l) 595 continue; 596 if ((l2->l_prflag & LPR_DETACHED) != 0) { 597 nfound += exiting; 598 continue; 599 } 600 if (lid != 0) { 601 if (l2->l_lid != lid) 602 continue; 603 /* 604 * Mark this LWP as the first waiter, if there 605 * is no other. 606 */ 607 if (l2->l_waiter == 0) 608 l2->l_waiter = curlid; 609 } else if (l2->l_waiter != 0) { 610 /* 611 * It already has a waiter - so don't 612 * collect it. If the waiter doesn't 613 * grab it we'll get another chance 614 * later. 615 */ 616 nfound++; 617 continue; 618 } 619 nfound++; 620 621 /* No need to lock the LWP in order to see LSZOMB. */ 622 if (l2->l_stat != LSZOMB) 623 continue; 624 625 /* 626 * We're no longer waiting. Reset the "first waiter" 627 * pointer on the target, in case it was us. 628 */ 629 l->l_waitingfor = 0; 630 l2->l_waiter = 0; 631 p->p_nlwpwait--; 632 if (departed) 633 *departed = l2->l_lid; 634 sched_lwp_collect(l2); 635 636 /* lwp_free() releases the proc lock. */ 637 lwp_free(l2, false, false); 638 mutex_enter(p->p_lock); 639 return 0; 640 } 641 642 if (error != 0) 643 break; 644 if (nfound == 0) { 645 error = ESRCH; 646 break; 647 } 648 649 /* 650 * Note: since the lock will be dropped, need to restart on 651 * wakeup to run all LWPs again, e.g. there may be new LWPs. 652 */ 653 if (exiting) { 654 KASSERT(p->p_nlwps > 1); 655 cv_wait(&p->p_lwpcv, p->p_lock); 656 error = EAGAIN; 657 break; 658 } 659 660 /* 661 * If all other LWPs are waiting for exits or suspends 662 * and the supply of zombies and potential zombies is 663 * exhausted, then we are about to deadlock. 664 * 665 * If the process is exiting (and this LWP is not the one 666 * that is coordinating the exit) then bail out now. 667 */ 668 if ((p->p_sflag & PS_WEXIT) != 0 || 669 p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) { 670 error = EDEADLK; 671 break; 672 } 673 674 /* 675 * Sit around and wait for something to happen. We'll be 676 * awoken if any of the conditions examined change: if an 677 * LWP exits, is collected, or is detached. 678 */ 679 if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0) 680 break; 681 } 682 683 /* 684 * We didn't find any LWPs to collect, we may have received a 685 * signal, or some other condition has caused us to bail out. 686 * 687 * If waiting on a specific LWP, clear the waiters marker: some 688 * other LWP may want it. Then, kick all the remaining waiters 689 * so that they can re-check for zombies and for deadlock. 690 */ 691 if (lid != 0) { 692 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 693 if (l2->l_lid == lid) { 694 if (l2->l_waiter == curlid) 695 l2->l_waiter = 0; 696 break; 697 } 698 } 699 } 700 p->p_nlwpwait--; 701 l->l_waitingfor = 0; 702 cv_broadcast(&p->p_lwpcv); 703 704 return error; 705 } 706 707 static lwpid_t 708 lwp_find_free_lid(lwpid_t try_lid, lwp_t * new_lwp, proc_t *p) 709 { 710 #define LID_SCAN (1u << 31) 711 lwp_t *scan, *free_before; 712 lwpid_t nxt_lid; 713 714 /* 715 * We want the first unused lid greater than or equal to 716 * try_lid (modulo 2^31). 717 * (If nothing else ld.elf_so doesn't want lwpid with the top bit set.) 718 * We must not return 0, and avoiding 'LID_SCAN - 1' makes 719 * the outer test easier. 720 * This would be much easier if the list were sorted in 721 * increasing order. 722 * The list is kept sorted in decreasing order. 723 * This code is only used after a process has generated 2^31 lwp. 724 * 725 * Code assumes it can always find an id. 726 */ 727 728 try_lid &= LID_SCAN - 1; 729 if (try_lid <= 1) 730 try_lid = 2; 731 732 free_before = NULL; 733 nxt_lid = LID_SCAN - 1; 734 LIST_FOREACH(scan, &p->p_lwps, l_sibling) { 735 if (scan->l_lid != nxt_lid) { 736 /* There are available lid before this entry */ 737 free_before = scan; 738 if (try_lid > scan->l_lid) 739 break; 740 } 741 if (try_lid == scan->l_lid) { 742 /* The ideal lid is busy, take a higher one */ 743 if (free_before != NULL) { 744 try_lid = free_before->l_lid + 1; 745 break; 746 } 747 /* No higher ones, reuse low numbers */ 748 try_lid = 2; 749 } 750 751 nxt_lid = scan->l_lid - 1; 752 if (LIST_NEXT(scan, l_sibling) == NULL) { 753 /* The value we have is lower than any existing lwp */ 754 LIST_INSERT_AFTER(scan, new_lwp, l_sibling); 755 return try_lid; 756 } 757 } 758 759 LIST_INSERT_BEFORE(free_before, new_lwp, l_sibling); 760 return try_lid; 761 } 762 763 /* 764 * Create a new LWP within process 'p2', using LWP 'l1' as a template. 765 * The new LWP is created in state LSIDL and must be set running, 766 * suspended, or stopped by the caller. 767 */ 768 int 769 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags, 770 void *stack, size_t stacksize, void (*func)(void *), void *arg, 771 lwp_t **rnewlwpp, int sclass) 772 { 773 struct lwp *l2, *isfree; 774 turnstile_t *ts; 775 lwpid_t lid; 776 777 KASSERT(l1 == curlwp || l1->l_proc == &proc0); 778 779 /* 780 * Enforce limits, excluding the first lwp and kthreads. 781 */ 782 if (p2->p_nlwps != 0 && p2 != &proc0) { 783 uid_t uid = kauth_cred_getuid(l1->l_cred); 784 int count = chglwpcnt(uid, 1); 785 if (__predict_false(count > 786 p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) { 787 if (kauth_authorize_process(l1->l_cred, 788 KAUTH_PROCESS_RLIMIT, p2, 789 KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS), 790 &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR)) 791 != 0) { 792 (void)chglwpcnt(uid, -1); 793 return EAGAIN; 794 } 795 } 796 } 797 798 /* 799 * First off, reap any detached LWP waiting to be collected. 800 * We can re-use its LWP structure and turnstile. 801 */ 802 isfree = NULL; 803 if (p2->p_zomblwp != NULL) { 804 mutex_enter(p2->p_lock); 805 if ((isfree = p2->p_zomblwp) != NULL) { 806 p2->p_zomblwp = NULL; 807 lwp_free(isfree, true, false);/* releases proc mutex */ 808 } else 809 mutex_exit(p2->p_lock); 810 } 811 if (isfree == NULL) { 812 l2 = pool_cache_get(lwp_cache, PR_WAITOK); 813 memset(l2, 0, sizeof(*l2)); 814 l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK); 815 SLIST_INIT(&l2->l_pi_lenders); 816 } else { 817 l2 = isfree; 818 ts = l2->l_ts; 819 KASSERT(l2->l_inheritedprio == -1); 820 KASSERT(SLIST_EMPTY(&l2->l_pi_lenders)); 821 memset(l2, 0, sizeof(*l2)); 822 l2->l_ts = ts; 823 } 824 825 l2->l_stat = LSIDL; 826 l2->l_proc = p2; 827 l2->l_refcnt = 1; 828 l2->l_class = sclass; 829 830 /* 831 * If vfork(), we want the LWP to run fast and on the same CPU 832 * as its parent, so that it can reuse the VM context and cache 833 * footprint on the local CPU. 834 */ 835 l2->l_kpriority = ((flags & LWP_VFORK) ? true : false); 836 l2->l_kpribase = PRI_KERNEL; 837 l2->l_priority = l1->l_priority; 838 l2->l_inheritedprio = -1; 839 l2->l_flag = 0; 840 l2->l_pflag = LP_MPSAFE; 841 TAILQ_INIT(&l2->l_ld_locks); 842 843 /* 844 * For vfork, borrow parent's lwpctl context if it exists. 845 * This also causes us to return via lwp_userret. 846 */ 847 if (flags & LWP_VFORK && l1->l_lwpctl) { 848 l2->l_lwpctl = l1->l_lwpctl; 849 l2->l_flag |= LW_LWPCTL; 850 } 851 852 /* 853 * If not the first LWP in the process, grab a reference to the 854 * descriptor table. 855 */ 856 l2->l_fd = p2->p_fd; 857 if (p2->p_nlwps != 0) { 858 KASSERT(l1->l_proc == p2); 859 fd_hold(l2); 860 } else { 861 KASSERT(l1->l_proc != p2); 862 } 863 864 if (p2->p_flag & PK_SYSTEM) { 865 /* Mark it as a system LWP. */ 866 l2->l_flag |= LW_SYSTEM; 867 } 868 869 kpreempt_disable(); 870 l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex; 871 l2->l_cpu = l1->l_cpu; 872 kpreempt_enable(); 873 874 kdtrace_thread_ctor(NULL, l2); 875 lwp_initspecific(l2); 876 sched_lwp_fork(l1, l2); 877 lwp_update_creds(l2); 878 callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE); 879 callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2); 880 cv_init(&l2->l_sigcv, "sigwait"); 881 cv_init(&l2->l_waitcv, "vfork"); 882 l2->l_syncobj = &sched_syncobj; 883 884 if (rnewlwpp != NULL) 885 *rnewlwpp = l2; 886 887 /* 888 * PCU state needs to be saved before calling uvm_lwp_fork() so that 889 * the MD cpu_lwp_fork() can copy the saved state to the new LWP. 890 */ 891 pcu_save_all(l1); 892 893 uvm_lwp_setuarea(l2, uaddr); 894 uvm_lwp_fork(l1, l2, stack, stacksize, func, 895 (arg != NULL) ? arg : l2); 896 897 if ((flags & LWP_PIDLID) != 0) { 898 lid = proc_alloc_pid(p2); 899 l2->l_pflag |= LP_PIDLID; 900 } else { 901 lid = 0; 902 } 903 904 mutex_enter(p2->p_lock); 905 906 if ((flags & LWP_DETACHED) != 0) { 907 l2->l_prflag = LPR_DETACHED; 908 p2->p_ndlwps++; 909 } else 910 l2->l_prflag = 0; 911 912 l2->l_sigstk = l1->l_sigstk; 913 l2->l_sigmask = l1->l_sigmask; 914 TAILQ_INIT(&l2->l_sigpend.sp_info); 915 sigemptyset(&l2->l_sigpend.sp_set); 916 917 if (__predict_true(lid == 0)) { 918 /* 919 * XXX: l_lid are expected to be unique (for a process) 920 * if LWP_PIDLID is sometimes set this won't be true. 921 * Once 2^31 threads have been allocated we have to 922 * scan to ensure we allocate a unique value. 923 */ 924 lid = ++p2->p_nlwpid; 925 if (__predict_false(lid & LID_SCAN)) { 926 lid = lwp_find_free_lid(lid, l2, p2); 927 p2->p_nlwpid = lid | LID_SCAN; 928 /* l2 as been inserted into p_lwps in order */ 929 goto skip_insert; 930 } 931 p2->p_nlwpid = lid; 932 } 933 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling); 934 skip_insert: 935 l2->l_lid = lid; 936 p2->p_nlwps++; 937 p2->p_nrlwps++; 938 939 KASSERT(l2->l_affinity == NULL); 940 941 if ((p2->p_flag & PK_SYSTEM) == 0) { 942 /* Inherit the affinity mask. */ 943 if (l1->l_affinity) { 944 /* 945 * Note that we hold the state lock while inheriting 946 * the affinity to avoid race with sched_setaffinity(). 947 */ 948 lwp_lock(l1); 949 if (l1->l_affinity) { 950 kcpuset_use(l1->l_affinity); 951 l2->l_affinity = l1->l_affinity; 952 } 953 lwp_unlock(l1); 954 } 955 lwp_lock(l2); 956 /* Inherit a processor-set */ 957 l2->l_psid = l1->l_psid; 958 /* Look for a CPU to start */ 959 l2->l_cpu = sched_takecpu(l2); 960 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex); 961 } 962 mutex_exit(p2->p_lock); 963 964 SDT_PROBE(proc,,,lwp_create, l2, 0,0,0,0); 965 966 mutex_enter(proc_lock); 967 LIST_INSERT_HEAD(&alllwp, l2, l_list); 968 mutex_exit(proc_lock); 969 970 SYSCALL_TIME_LWP_INIT(l2); 971 972 if (p2->p_emul->e_lwp_fork) 973 (*p2->p_emul->e_lwp_fork)(l1, l2); 974 975 return (0); 976 } 977 978 /* 979 * Called by MD code when a new LWP begins execution. Must be called 980 * with the previous LWP locked (so at splsched), or if there is no 981 * previous LWP, at splsched. 982 */ 983 void 984 lwp_startup(struct lwp *prev, struct lwp *new_lwp) 985 { 986 KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev); 987 988 SDT_PROBE(proc,,,lwp_start, new_lwp, 0,0,0,0); 989 990 KASSERT(kpreempt_disabled()); 991 if (prev != NULL) { 992 /* 993 * Normalize the count of the spin-mutexes, it was 994 * increased in mi_switch(). Unmark the state of 995 * context switch - it is finished for previous LWP. 996 */ 997 curcpu()->ci_mtx_count++; 998 membar_exit(); 999 prev->l_ctxswtch = 0; 1000 } 1001 KPREEMPT_DISABLE(new_lwp); 1002 spl0(); 1003 if (__predict_true(new_lwp->l_proc->p_vmspace)) 1004 pmap_activate(new_lwp); 1005 1006 /* Note trip through cpu_switchto(). */ 1007 pserialize_switchpoint(); 1008 1009 LOCKDEBUG_BARRIER(NULL, 0); 1010 KPREEMPT_ENABLE(new_lwp); 1011 if ((new_lwp->l_pflag & LP_MPSAFE) == 0) { 1012 KERNEL_LOCK(1, new_lwp); 1013 } 1014 } 1015 1016 /* 1017 * Exit an LWP. 1018 */ 1019 void 1020 lwp_exit(struct lwp *l) 1021 { 1022 struct proc *p = l->l_proc; 1023 struct lwp *l2; 1024 bool current; 1025 1026 current = (l == curlwp); 1027 1028 KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL)); 1029 KASSERT(p == curproc); 1030 1031 SDT_PROBE(proc,,,lwp_exit, l, 0,0,0,0); 1032 1033 /* 1034 * Verify that we hold no locks other than the kernel lock. 1035 */ 1036 LOCKDEBUG_BARRIER(&kernel_lock, 0); 1037 1038 /* 1039 * If we are the last live LWP in a process, we need to exit the 1040 * entire process. We do so with an exit status of zero, because 1041 * it's a "controlled" exit, and because that's what Solaris does. 1042 * 1043 * We are not quite a zombie yet, but for accounting purposes we 1044 * must increment the count of zombies here. 1045 * 1046 * Note: the last LWP's specificdata will be deleted here. 1047 */ 1048 mutex_enter(p->p_lock); 1049 if (p->p_nlwps - p->p_nzlwps == 1) { 1050 KASSERT(current == true); 1051 KASSERT(p != &proc0); 1052 /* XXXSMP kernel_lock not held */ 1053 exit1(l, 0); 1054 /* NOTREACHED */ 1055 } 1056 p->p_nzlwps++; 1057 mutex_exit(p->p_lock); 1058 1059 if (p->p_emul->e_lwp_exit) 1060 (*p->p_emul->e_lwp_exit)(l); 1061 1062 /* Drop filedesc reference. */ 1063 fd_free(); 1064 1065 /* Delete the specificdata while it's still safe to sleep. */ 1066 lwp_finispecific(l); 1067 1068 /* 1069 * Release our cached credentials. 1070 */ 1071 kauth_cred_free(l->l_cred); 1072 callout_destroy(&l->l_timeout_ch); 1073 1074 /* 1075 * Remove the LWP from the global list. 1076 * Free its LID from the PID namespace if needed. 1077 */ 1078 mutex_enter(proc_lock); 1079 LIST_REMOVE(l, l_list); 1080 if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) { 1081 proc_free_pid(l->l_lid); 1082 } 1083 mutex_exit(proc_lock); 1084 1085 /* 1086 * Get rid of all references to the LWP that others (e.g. procfs) 1087 * may have, and mark the LWP as a zombie. If the LWP is detached, 1088 * mark it waiting for collection in the proc structure. Note that 1089 * before we can do that, we need to free any other dead, deatched 1090 * LWP waiting to meet its maker. 1091 */ 1092 mutex_enter(p->p_lock); 1093 lwp_drainrefs(l); 1094 1095 if ((l->l_prflag & LPR_DETACHED) != 0) { 1096 while ((l2 = p->p_zomblwp) != NULL) { 1097 p->p_zomblwp = NULL; 1098 lwp_free(l2, false, false);/* releases proc mutex */ 1099 mutex_enter(p->p_lock); 1100 l->l_refcnt++; 1101 lwp_drainrefs(l); 1102 } 1103 p->p_zomblwp = l; 1104 } 1105 1106 /* 1107 * If we find a pending signal for the process and we have been 1108 * asked to check for signals, then we lose: arrange to have 1109 * all other LWPs in the process check for signals. 1110 */ 1111 if ((l->l_flag & LW_PENDSIG) != 0 && 1112 firstsig(&p->p_sigpend.sp_set) != 0) { 1113 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 1114 lwp_lock(l2); 1115 l2->l_flag |= LW_PENDSIG; 1116 lwp_unlock(l2); 1117 } 1118 } 1119 1120 /* 1121 * Release any PCU resources before becoming a zombie. 1122 */ 1123 pcu_discard_all(l); 1124 1125 lwp_lock(l); 1126 l->l_stat = LSZOMB; 1127 if (l->l_name != NULL) { 1128 strcpy(l->l_name, "(zombie)"); 1129 } 1130 lwp_unlock(l); 1131 p->p_nrlwps--; 1132 cv_broadcast(&p->p_lwpcv); 1133 if (l->l_lwpctl != NULL) 1134 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; 1135 mutex_exit(p->p_lock); 1136 1137 /* 1138 * We can no longer block. At this point, lwp_free() may already 1139 * be gunning for us. On a multi-CPU system, we may be off p_lwps. 1140 * 1141 * Free MD LWP resources. 1142 */ 1143 cpu_lwp_free(l, 0); 1144 1145 if (current) { 1146 pmap_deactivate(l); 1147 1148 /* 1149 * Release the kernel lock, and switch away into 1150 * oblivion. 1151 */ 1152 #ifdef notyet 1153 /* XXXSMP hold in lwp_userret() */ 1154 KERNEL_UNLOCK_LAST(l); 1155 #else 1156 KERNEL_UNLOCK_ALL(l, NULL); 1157 #endif 1158 lwp_exit_switchaway(l); 1159 } 1160 } 1161 1162 /* 1163 * Free a dead LWP's remaining resources. 1164 * 1165 * XXXLWP limits. 1166 */ 1167 void 1168 lwp_free(struct lwp *l, bool recycle, bool last) 1169 { 1170 struct proc *p = l->l_proc; 1171 struct rusage *ru; 1172 ksiginfoq_t kq; 1173 1174 KASSERT(l != curlwp); 1175 KASSERT(last || mutex_owned(p->p_lock)); 1176 1177 /* 1178 * We use the process credentials instead of the lwp credentials here 1179 * because the lwp credentials maybe cached (just after a setuid call) 1180 * and we don't want pay for syncing, since the lwp is going away 1181 * anyway 1182 */ 1183 if (p != &proc0 && p->p_nlwps != 1) 1184 (void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1); 1185 /* 1186 * If this was not the last LWP in the process, then adjust 1187 * counters and unlock. 1188 */ 1189 if (!last) { 1190 /* 1191 * Add the LWP's run time to the process' base value. 1192 * This needs to co-incide with coming off p_lwps. 1193 */ 1194 bintime_add(&p->p_rtime, &l->l_rtime); 1195 p->p_pctcpu += l->l_pctcpu; 1196 ru = &p->p_stats->p_ru; 1197 ruadd(ru, &l->l_ru); 1198 ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw); 1199 ru->ru_nivcsw += l->l_nivcsw; 1200 LIST_REMOVE(l, l_sibling); 1201 p->p_nlwps--; 1202 p->p_nzlwps--; 1203 if ((l->l_prflag & LPR_DETACHED) != 0) 1204 p->p_ndlwps--; 1205 1206 /* 1207 * Have any LWPs sleeping in lwp_wait() recheck for 1208 * deadlock. 1209 */ 1210 cv_broadcast(&p->p_lwpcv); 1211 mutex_exit(p->p_lock); 1212 } 1213 1214 #ifdef MULTIPROCESSOR 1215 /* 1216 * In the unlikely event that the LWP is still on the CPU, 1217 * then spin until it has switched away. We need to release 1218 * all locks to avoid deadlock against interrupt handlers on 1219 * the target CPU. 1220 */ 1221 if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) { 1222 int count; 1223 (void)count; /* XXXgcc */ 1224 KERNEL_UNLOCK_ALL(curlwp, &count); 1225 while ((l->l_pflag & LP_RUNNING) != 0 || 1226 l->l_cpu->ci_curlwp == l) 1227 SPINLOCK_BACKOFF_HOOK; 1228 KERNEL_LOCK(count, curlwp); 1229 } 1230 #endif 1231 1232 /* 1233 * Destroy the LWP's remaining signal information. 1234 */ 1235 ksiginfo_queue_init(&kq); 1236 sigclear(&l->l_sigpend, NULL, &kq); 1237 ksiginfo_queue_drain(&kq); 1238 cv_destroy(&l->l_sigcv); 1239 cv_destroy(&l->l_waitcv); 1240 1241 /* 1242 * Free lwpctl structure and affinity. 1243 */ 1244 if (l->l_lwpctl) { 1245 lwp_ctl_free(l); 1246 } 1247 if (l->l_affinity) { 1248 kcpuset_unuse(l->l_affinity, NULL); 1249 l->l_affinity = NULL; 1250 } 1251 1252 /* 1253 * Free the LWP's turnstile and the LWP structure itself unless the 1254 * caller wants to recycle them. Also, free the scheduler specific 1255 * data. 1256 * 1257 * We can't return turnstile0 to the pool (it didn't come from it), 1258 * so if it comes up just drop it quietly and move on. 1259 * 1260 * We don't recycle the VM resources at this time. 1261 */ 1262 1263 if (!recycle && l->l_ts != &turnstile0) 1264 pool_cache_put(turnstile_cache, l->l_ts); 1265 if (l->l_name != NULL) 1266 kmem_free(l->l_name, MAXCOMLEN); 1267 1268 cpu_lwp_free2(l); 1269 uvm_lwp_exit(l); 1270 1271 KASSERT(SLIST_EMPTY(&l->l_pi_lenders)); 1272 KASSERT(l->l_inheritedprio == -1); 1273 KASSERT(l->l_blcnt == 0); 1274 kdtrace_thread_dtor(NULL, l); 1275 if (!recycle) 1276 pool_cache_put(lwp_cache, l); 1277 } 1278 1279 /* 1280 * Migrate the LWP to the another CPU. Unlocks the LWP. 1281 */ 1282 void 1283 lwp_migrate(lwp_t *l, struct cpu_info *tci) 1284 { 1285 struct schedstate_percpu *tspc; 1286 int lstat = l->l_stat; 1287 1288 KASSERT(lwp_locked(l, NULL)); 1289 KASSERT(tci != NULL); 1290 1291 /* If LWP is still on the CPU, it must be handled like LSONPROC */ 1292 if ((l->l_pflag & LP_RUNNING) != 0) { 1293 lstat = LSONPROC; 1294 } 1295 1296 /* 1297 * The destination CPU could be changed while previous migration 1298 * was not finished. 1299 */ 1300 if (l->l_target_cpu != NULL) { 1301 l->l_target_cpu = tci; 1302 lwp_unlock(l); 1303 return; 1304 } 1305 1306 /* Nothing to do if trying to migrate to the same CPU */ 1307 if (l->l_cpu == tci) { 1308 lwp_unlock(l); 1309 return; 1310 } 1311 1312 KASSERT(l->l_target_cpu == NULL); 1313 tspc = &tci->ci_schedstate; 1314 switch (lstat) { 1315 case LSRUN: 1316 l->l_target_cpu = tci; 1317 break; 1318 case LSIDL: 1319 l->l_cpu = tci; 1320 lwp_unlock_to(l, tspc->spc_mutex); 1321 return; 1322 case LSSLEEP: 1323 l->l_cpu = tci; 1324 break; 1325 case LSSTOP: 1326 case LSSUSPENDED: 1327 l->l_cpu = tci; 1328 if (l->l_wchan == NULL) { 1329 lwp_unlock_to(l, tspc->spc_lwplock); 1330 return; 1331 } 1332 break; 1333 case LSONPROC: 1334 l->l_target_cpu = tci; 1335 spc_lock(l->l_cpu); 1336 cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT); 1337 spc_unlock(l->l_cpu); 1338 break; 1339 } 1340 lwp_unlock(l); 1341 } 1342 1343 /* 1344 * Find the LWP in the process. Arguments may be zero, in such case, 1345 * the calling process and first LWP in the list will be used. 1346 * On success - returns proc locked. 1347 */ 1348 struct lwp * 1349 lwp_find2(pid_t pid, lwpid_t lid) 1350 { 1351 proc_t *p; 1352 lwp_t *l; 1353 1354 /* Find the process. */ 1355 if (pid != 0) { 1356 mutex_enter(proc_lock); 1357 p = proc_find(pid); 1358 if (p == NULL) { 1359 mutex_exit(proc_lock); 1360 return NULL; 1361 } 1362 mutex_enter(p->p_lock); 1363 mutex_exit(proc_lock); 1364 } else { 1365 p = curlwp->l_proc; 1366 mutex_enter(p->p_lock); 1367 } 1368 /* Find the thread. */ 1369 if (lid != 0) { 1370 l = lwp_find(p, lid); 1371 } else { 1372 l = LIST_FIRST(&p->p_lwps); 1373 } 1374 if (l == NULL) { 1375 mutex_exit(p->p_lock); 1376 } 1377 return l; 1378 } 1379 1380 /* 1381 * Look up a live LWP within the specified process. 1382 * 1383 * Must be called with p->p_lock held. 1384 */ 1385 struct lwp * 1386 lwp_find(struct proc *p, lwpid_t id) 1387 { 1388 struct lwp *l; 1389 1390 KASSERT(mutex_owned(p->p_lock)); 1391 1392 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1393 if (l->l_lid == id) 1394 break; 1395 } 1396 1397 /* 1398 * No need to lock - all of these conditions will 1399 * be visible with the process level mutex held. 1400 */ 1401 if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB)) 1402 l = NULL; 1403 1404 return l; 1405 } 1406 1407 /* 1408 * Update an LWP's cached credentials to mirror the process' master copy. 1409 * 1410 * This happens early in the syscall path, on user trap, and on LWP 1411 * creation. A long-running LWP can also voluntarily choose to update 1412 * it's credentials by calling this routine. This may be called from 1413 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand. 1414 */ 1415 void 1416 lwp_update_creds(struct lwp *l) 1417 { 1418 kauth_cred_t oc; 1419 struct proc *p; 1420 1421 p = l->l_proc; 1422 oc = l->l_cred; 1423 1424 mutex_enter(p->p_lock); 1425 kauth_cred_hold(p->p_cred); 1426 l->l_cred = p->p_cred; 1427 l->l_prflag &= ~LPR_CRMOD; 1428 mutex_exit(p->p_lock); 1429 if (oc != NULL) 1430 kauth_cred_free(oc); 1431 } 1432 1433 /* 1434 * Verify that an LWP is locked, and optionally verify that the lock matches 1435 * one we specify. 1436 */ 1437 int 1438 lwp_locked(struct lwp *l, kmutex_t *mtx) 1439 { 1440 kmutex_t *cur = l->l_mutex; 1441 1442 return mutex_owned(cur) && (mtx == cur || mtx == NULL); 1443 } 1444 1445 /* 1446 * Lend a new mutex to an LWP. The old mutex must be held. 1447 */ 1448 void 1449 lwp_setlock(struct lwp *l, kmutex_t *mtx) 1450 { 1451 1452 KASSERT(mutex_owned(l->l_mutex)); 1453 1454 membar_exit(); 1455 l->l_mutex = mtx; 1456 } 1457 1458 /* 1459 * Lend a new mutex to an LWP, and release the old mutex. The old mutex 1460 * must be held. 1461 */ 1462 void 1463 lwp_unlock_to(struct lwp *l, kmutex_t *mtx) 1464 { 1465 kmutex_t *old; 1466 1467 KASSERT(lwp_locked(l, NULL)); 1468 1469 old = l->l_mutex; 1470 membar_exit(); 1471 l->l_mutex = mtx; 1472 mutex_spin_exit(old); 1473 } 1474 1475 int 1476 lwp_trylock(struct lwp *l) 1477 { 1478 kmutex_t *old; 1479 1480 for (;;) { 1481 if (!mutex_tryenter(old = l->l_mutex)) 1482 return 0; 1483 if (__predict_true(l->l_mutex == old)) 1484 return 1; 1485 mutex_spin_exit(old); 1486 } 1487 } 1488 1489 void 1490 lwp_unsleep(lwp_t *l, bool cleanup) 1491 { 1492 1493 KASSERT(mutex_owned(l->l_mutex)); 1494 (*l->l_syncobj->sobj_unsleep)(l, cleanup); 1495 } 1496 1497 /* 1498 * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is 1499 * set. 1500 */ 1501 void 1502 lwp_userret(struct lwp *l) 1503 { 1504 struct proc *p; 1505 int sig; 1506 1507 KASSERT(l == curlwp); 1508 KASSERT(l->l_stat == LSONPROC); 1509 p = l->l_proc; 1510 1511 #ifndef __HAVE_FAST_SOFTINTS 1512 /* Run pending soft interrupts. */ 1513 if (l->l_cpu->ci_data.cpu_softints != 0) 1514 softint_overlay(); 1515 #endif 1516 1517 /* 1518 * It is safe to do this read unlocked on a MP system.. 1519 */ 1520 while ((l->l_flag & LW_USERRET) != 0) { 1521 /* 1522 * Process pending signals first, unless the process 1523 * is dumping core or exiting, where we will instead 1524 * enter the LW_WSUSPEND case below. 1525 */ 1526 if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) == 1527 LW_PENDSIG) { 1528 mutex_enter(p->p_lock); 1529 while ((sig = issignal(l)) != 0) 1530 postsig(sig); 1531 mutex_exit(p->p_lock); 1532 } 1533 1534 /* 1535 * Core-dump or suspend pending. 1536 * 1537 * In case of core dump, suspend ourselves, so that the kernel 1538 * stack and therefore the userland registers saved in the 1539 * trapframe are around for coredump() to write them out. 1540 * We also need to save any PCU resources that we have so that 1541 * they accessible for coredump(). We issue a wakeup on 1542 * p->p_lwpcv so that sigexit() will write the core file out 1543 * once all other LWPs are suspended. 1544 */ 1545 if ((l->l_flag & LW_WSUSPEND) != 0) { 1546 pcu_save_all(l); 1547 mutex_enter(p->p_lock); 1548 p->p_nrlwps--; 1549 cv_broadcast(&p->p_lwpcv); 1550 lwp_lock(l); 1551 l->l_stat = LSSUSPENDED; 1552 lwp_unlock(l); 1553 mutex_exit(p->p_lock); 1554 lwp_lock(l); 1555 mi_switch(l); 1556 } 1557 1558 /* Process is exiting. */ 1559 if ((l->l_flag & LW_WEXIT) != 0) { 1560 lwp_exit(l); 1561 KASSERT(0); 1562 /* NOTREACHED */ 1563 } 1564 1565 /* update lwpctl processor (for vfork child_return) */ 1566 if (l->l_flag & LW_LWPCTL) { 1567 lwp_lock(l); 1568 KASSERT(kpreempt_disabled()); 1569 l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu); 1570 l->l_lwpctl->lc_pctr++; 1571 l->l_flag &= ~LW_LWPCTL; 1572 lwp_unlock(l); 1573 } 1574 } 1575 } 1576 1577 /* 1578 * Force an LWP to enter the kernel, to take a trip through lwp_userret(). 1579 */ 1580 void 1581 lwp_need_userret(struct lwp *l) 1582 { 1583 KASSERT(lwp_locked(l, NULL)); 1584 1585 /* 1586 * Since the tests in lwp_userret() are done unlocked, make sure 1587 * that the condition will be seen before forcing the LWP to enter 1588 * kernel mode. 1589 */ 1590 membar_producer(); 1591 cpu_signotify(l); 1592 } 1593 1594 /* 1595 * Add one reference to an LWP. This will prevent the LWP from 1596 * exiting, thus keep the lwp structure and PCB around to inspect. 1597 */ 1598 void 1599 lwp_addref(struct lwp *l) 1600 { 1601 1602 KASSERT(mutex_owned(l->l_proc->p_lock)); 1603 KASSERT(l->l_stat != LSZOMB); 1604 KASSERT(l->l_refcnt != 0); 1605 1606 l->l_refcnt++; 1607 } 1608 1609 /* 1610 * Remove one reference to an LWP. If this is the last reference, 1611 * then we must finalize the LWP's death. 1612 */ 1613 void 1614 lwp_delref(struct lwp *l) 1615 { 1616 struct proc *p = l->l_proc; 1617 1618 mutex_enter(p->p_lock); 1619 lwp_delref2(l); 1620 mutex_exit(p->p_lock); 1621 } 1622 1623 /* 1624 * Remove one reference to an LWP. If this is the last reference, 1625 * then we must finalize the LWP's death. The proc mutex is held 1626 * on entry. 1627 */ 1628 void 1629 lwp_delref2(struct lwp *l) 1630 { 1631 struct proc *p = l->l_proc; 1632 1633 KASSERT(mutex_owned(p->p_lock)); 1634 KASSERT(l->l_stat != LSZOMB); 1635 KASSERT(l->l_refcnt > 0); 1636 if (--l->l_refcnt == 0) 1637 cv_broadcast(&p->p_lwpcv); 1638 } 1639 1640 /* 1641 * Drain all references to the current LWP. 1642 */ 1643 void 1644 lwp_drainrefs(struct lwp *l) 1645 { 1646 struct proc *p = l->l_proc; 1647 1648 KASSERT(mutex_owned(p->p_lock)); 1649 KASSERT(l->l_refcnt != 0); 1650 1651 l->l_refcnt--; 1652 while (l->l_refcnt != 0) 1653 cv_wait(&p->p_lwpcv, p->p_lock); 1654 } 1655 1656 /* 1657 * Return true if the specified LWP is 'alive'. Only p->p_lock need 1658 * be held. 1659 */ 1660 bool 1661 lwp_alive(lwp_t *l) 1662 { 1663 1664 KASSERT(mutex_owned(l->l_proc->p_lock)); 1665 1666 switch (l->l_stat) { 1667 case LSSLEEP: 1668 case LSRUN: 1669 case LSONPROC: 1670 case LSSTOP: 1671 case LSSUSPENDED: 1672 return true; 1673 default: 1674 return false; 1675 } 1676 } 1677 1678 /* 1679 * Return first live LWP in the process. 1680 */ 1681 lwp_t * 1682 lwp_find_first(proc_t *p) 1683 { 1684 lwp_t *l; 1685 1686 KASSERT(mutex_owned(p->p_lock)); 1687 1688 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1689 if (lwp_alive(l)) { 1690 return l; 1691 } 1692 } 1693 1694 return NULL; 1695 } 1696 1697 /* 1698 * Allocate a new lwpctl structure for a user LWP. 1699 */ 1700 int 1701 lwp_ctl_alloc(vaddr_t *uaddr) 1702 { 1703 lcproc_t *lp; 1704 u_int bit, i, offset; 1705 struct uvm_object *uao; 1706 int error; 1707 lcpage_t *lcp; 1708 proc_t *p; 1709 lwp_t *l; 1710 1711 l = curlwp; 1712 p = l->l_proc; 1713 1714 /* don't allow a vforked process to create lwp ctls */ 1715 if (p->p_lflag & PL_PPWAIT) 1716 return EBUSY; 1717 1718 if (l->l_lcpage != NULL) { 1719 lcp = l->l_lcpage; 1720 *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr; 1721 return 0; 1722 } 1723 1724 /* First time around, allocate header structure for the process. */ 1725 if ((lp = p->p_lwpctl) == NULL) { 1726 lp = kmem_alloc(sizeof(*lp), KM_SLEEP); 1727 mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE); 1728 lp->lp_uao = NULL; 1729 TAILQ_INIT(&lp->lp_pages); 1730 mutex_enter(p->p_lock); 1731 if (p->p_lwpctl == NULL) { 1732 p->p_lwpctl = lp; 1733 mutex_exit(p->p_lock); 1734 } else { 1735 mutex_exit(p->p_lock); 1736 mutex_destroy(&lp->lp_lock); 1737 kmem_free(lp, sizeof(*lp)); 1738 lp = p->p_lwpctl; 1739 } 1740 } 1741 1742 /* 1743 * Set up an anonymous memory region to hold the shared pages. 1744 * Map them into the process' address space. The user vmspace 1745 * gets the first reference on the UAO. 1746 */ 1747 mutex_enter(&lp->lp_lock); 1748 if (lp->lp_uao == NULL) { 1749 lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0); 1750 lp->lp_cur = 0; 1751 lp->lp_max = LWPCTL_UAREA_SZ; 1752 lp->lp_uva = p->p_emul->e_vm_default_addr(p, 1753 (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ); 1754 error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva, 1755 LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW, 1756 UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0)); 1757 if (error != 0) { 1758 uao_detach(lp->lp_uao); 1759 lp->lp_uao = NULL; 1760 mutex_exit(&lp->lp_lock); 1761 return error; 1762 } 1763 } 1764 1765 /* Get a free block and allocate for this LWP. */ 1766 TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) { 1767 if (lcp->lcp_nfree != 0) 1768 break; 1769 } 1770 if (lcp == NULL) { 1771 /* Nothing available - try to set up a free page. */ 1772 if (lp->lp_cur == lp->lp_max) { 1773 mutex_exit(&lp->lp_lock); 1774 return ENOMEM; 1775 } 1776 lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP); 1777 if (lcp == NULL) { 1778 mutex_exit(&lp->lp_lock); 1779 return ENOMEM; 1780 } 1781 /* 1782 * Wire the next page down in kernel space. Since this 1783 * is a new mapping, we must add a reference. 1784 */ 1785 uao = lp->lp_uao; 1786 (*uao->pgops->pgo_reference)(uao); 1787 lcp->lcp_kaddr = vm_map_min(kernel_map); 1788 error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE, 1789 uao, lp->lp_cur, PAGE_SIZE, 1790 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, 1791 UVM_INH_NONE, UVM_ADV_RANDOM, 0)); 1792 if (error != 0) { 1793 mutex_exit(&lp->lp_lock); 1794 kmem_free(lcp, LWPCTL_LCPAGE_SZ); 1795 (*uao->pgops->pgo_detach)(uao); 1796 return error; 1797 } 1798 error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr, 1799 lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0); 1800 if (error != 0) { 1801 mutex_exit(&lp->lp_lock); 1802 uvm_unmap(kernel_map, lcp->lcp_kaddr, 1803 lcp->lcp_kaddr + PAGE_SIZE); 1804 kmem_free(lcp, LWPCTL_LCPAGE_SZ); 1805 return error; 1806 } 1807 /* Prepare the page descriptor and link into the list. */ 1808 lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur; 1809 lp->lp_cur += PAGE_SIZE; 1810 lcp->lcp_nfree = LWPCTL_PER_PAGE; 1811 lcp->lcp_rotor = 0; 1812 memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ); 1813 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain); 1814 } 1815 for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) { 1816 if (++i >= LWPCTL_BITMAP_ENTRIES) 1817 i = 0; 1818 } 1819 bit = ffs(lcp->lcp_bitmap[i]) - 1; 1820 lcp->lcp_bitmap[i] ^= (1 << bit); 1821 lcp->lcp_rotor = i; 1822 lcp->lcp_nfree--; 1823 l->l_lcpage = lcp; 1824 offset = (i << 5) + bit; 1825 l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset; 1826 *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t); 1827 mutex_exit(&lp->lp_lock); 1828 1829 KPREEMPT_DISABLE(l); 1830 l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index; 1831 KPREEMPT_ENABLE(l); 1832 1833 return 0; 1834 } 1835 1836 /* 1837 * Free an lwpctl structure back to the per-process list. 1838 */ 1839 void 1840 lwp_ctl_free(lwp_t *l) 1841 { 1842 struct proc *p = l->l_proc; 1843 lcproc_t *lp; 1844 lcpage_t *lcp; 1845 u_int map, offset; 1846 1847 /* don't free a lwp context we borrowed for vfork */ 1848 if (p->p_lflag & PL_PPWAIT) { 1849 l->l_lwpctl = NULL; 1850 return; 1851 } 1852 1853 lp = p->p_lwpctl; 1854 KASSERT(lp != NULL); 1855 1856 lcp = l->l_lcpage; 1857 offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr); 1858 KASSERT(offset < LWPCTL_PER_PAGE); 1859 1860 mutex_enter(&lp->lp_lock); 1861 lcp->lcp_nfree++; 1862 map = offset >> 5; 1863 lcp->lcp_bitmap[map] |= (1 << (offset & 31)); 1864 if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0) 1865 lcp->lcp_rotor = map; 1866 if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) { 1867 TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain); 1868 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain); 1869 } 1870 mutex_exit(&lp->lp_lock); 1871 } 1872 1873 /* 1874 * Process is exiting; tear down lwpctl state. This can only be safely 1875 * called by the last LWP in the process. 1876 */ 1877 void 1878 lwp_ctl_exit(void) 1879 { 1880 lcpage_t *lcp, *next; 1881 lcproc_t *lp; 1882 proc_t *p; 1883 lwp_t *l; 1884 1885 l = curlwp; 1886 l->l_lwpctl = NULL; 1887 l->l_lcpage = NULL; 1888 p = l->l_proc; 1889 lp = p->p_lwpctl; 1890 1891 KASSERT(lp != NULL); 1892 KASSERT(p->p_nlwps == 1); 1893 1894 for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) { 1895 next = TAILQ_NEXT(lcp, lcp_chain); 1896 uvm_unmap(kernel_map, lcp->lcp_kaddr, 1897 lcp->lcp_kaddr + PAGE_SIZE); 1898 kmem_free(lcp, LWPCTL_LCPAGE_SZ); 1899 } 1900 1901 if (lp->lp_uao != NULL) { 1902 uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva, 1903 lp->lp_uva + LWPCTL_UAREA_SZ); 1904 } 1905 1906 mutex_destroy(&lp->lp_lock); 1907 kmem_free(lp, sizeof(*lp)); 1908 p->p_lwpctl = NULL; 1909 } 1910 1911 /* 1912 * Return the current LWP's "preemption counter". Used to detect 1913 * preemption across operations that can tolerate preemption without 1914 * crashing, but which may generate incorrect results if preempted. 1915 */ 1916 uint64_t 1917 lwp_pctr(void) 1918 { 1919 1920 return curlwp->l_ncsw; 1921 } 1922 1923 /* 1924 * Set an LWP's private data pointer. 1925 */ 1926 int 1927 lwp_setprivate(struct lwp *l, void *ptr) 1928 { 1929 int error = 0; 1930 1931 l->l_private = ptr; 1932 #ifdef __HAVE_CPU_LWP_SETPRIVATE 1933 error = cpu_lwp_setprivate(l, ptr); 1934 #endif 1935 return error; 1936 } 1937 1938 #if defined(DDB) 1939 #include <machine/pcb.h> 1940 1941 void 1942 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1943 { 1944 lwp_t *l; 1945 1946 LIST_FOREACH(l, &alllwp, l_list) { 1947 uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l); 1948 1949 if (addr < stack || stack + KSTACK_SIZE <= addr) { 1950 continue; 1951 } 1952 (*pr)("%p is %p+%zu, LWP %p's stack\n", 1953 (void *)addr, (void *)stack, 1954 (size_t)(addr - stack), l); 1955 } 1956 } 1957 #endif /* defined(DDB) */ 1958