1 /* $NetBSD: kern_lwp.c,v 1.149 2010/06/13 04:13:31 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Overview 34 * 35 * Lightweight processes (LWPs) are the basic unit or thread of 36 * execution within the kernel. The core state of an LWP is described 37 * by "struct lwp", also known as lwp_t. 38 * 39 * Each LWP is contained within a process (described by "struct proc"), 40 * Every process contains at least one LWP, but may contain more. The 41 * process describes attributes shared among all of its LWPs such as a 42 * private address space, global execution state (stopped, active, 43 * zombie, ...), signal disposition and so on. On a multiprocessor 44 * machine, multiple LWPs be executing concurrently in the kernel. 45 * 46 * Execution states 47 * 48 * At any given time, an LWP has overall state that is described by 49 * lwp::l_stat. The states are broken into two sets below. The first 50 * set is guaranteed to represent the absolute, current state of the 51 * LWP: 52 * 53 * LSONPROC 54 * 55 * On processor: the LWP is executing on a CPU, either in the 56 * kernel or in user space. 57 * 58 * LSRUN 59 * 60 * Runnable: the LWP is parked on a run queue, and may soon be 61 * chosen to run by an idle processor, or by a processor that 62 * has been asked to preempt a currently runnning but lower 63 * priority LWP. 64 * 65 * LSIDL 66 * 67 * Idle: the LWP has been created but has not yet executed, 68 * or it has ceased executing a unit of work and is waiting 69 * to be started again. 70 * 71 * LSSUSPENDED: 72 * 73 * Suspended: the LWP has had its execution suspended by 74 * another LWP in the same process using the _lwp_suspend() 75 * system call. User-level LWPs also enter the suspended 76 * state when the system is shutting down. 77 * 78 * The second set represent a "statement of intent" on behalf of the 79 * LWP. The LWP may in fact be executing on a processor, may be 80 * sleeping or idle. It is expected to take the necessary action to 81 * stop executing or become "running" again within a short timeframe. 82 * The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running. 83 * Importantly, it indicates that its state is tied to a CPU. 84 * 85 * LSZOMB: 86 * 87 * Dead or dying: the LWP has released most of its resources 88 * and is about to switch away into oblivion, or has already 89 * switched away. When it switches away, its few remaining 90 * resources can be collected. 91 * 92 * LSSLEEP: 93 * 94 * Sleeping: the LWP has entered itself onto a sleep queue, and 95 * has switched away or will switch away shortly to allow other 96 * LWPs to run on the CPU. 97 * 98 * LSSTOP: 99 * 100 * Stopped: the LWP has been stopped as a result of a job 101 * control signal, or as a result of the ptrace() interface. 102 * 103 * Stopped LWPs may run briefly within the kernel to handle 104 * signals that they receive, but will not return to user space 105 * until their process' state is changed away from stopped. 106 * 107 * Single LWPs within a process can not be set stopped 108 * selectively: all actions that can stop or continue LWPs 109 * occur at the process level. 110 * 111 * State transitions 112 * 113 * Note that the LSSTOP state may only be set when returning to 114 * user space in userret(), or when sleeping interruptably. The 115 * LSSUSPENDED state may only be set in userret(). Before setting 116 * those states, we try to ensure that the LWPs will release all 117 * locks that they hold, and at a minimum try to ensure that the 118 * LWP can be set runnable again by a signal. 119 * 120 * LWPs may transition states in the following ways: 121 * 122 * RUN -------> ONPROC ONPROC -----> RUN 123 * > SLEEP 124 * > STOPPED 125 * > SUSPENDED 126 * > ZOMB 127 * > IDL (special cases) 128 * 129 * STOPPED ---> RUN SUSPENDED --> RUN 130 * > SLEEP 131 * 132 * SLEEP -----> ONPROC IDL --------> RUN 133 * > RUN > SUSPENDED 134 * > STOPPED > STOPPED 135 * > ONPROC (special cases) 136 * 137 * Some state transitions are only possible with kernel threads (eg 138 * ONPROC -> IDL) and happen under tightly controlled circumstances 139 * free of unwanted side effects. 140 * 141 * Migration 142 * 143 * Migration of threads from one CPU to another could be performed 144 * internally by the scheduler via sched_takecpu() or sched_catchlwp() 145 * functions. The universal lwp_migrate() function should be used for 146 * any other cases. Subsystems in the kernel must be aware that CPU 147 * of LWP may change, while it is not locked. 148 * 149 * Locking 150 * 151 * The majority of fields in 'struct lwp' are covered by a single, 152 * general spin lock pointed to by lwp::l_mutex. The locks covering 153 * each field are documented in sys/lwp.h. 154 * 155 * State transitions must be made with the LWP's general lock held, 156 * and may cause the LWP's lock pointer to change. Manipulation of 157 * the general lock is not performed directly, but through calls to 158 * lwp_lock(), lwp_relock() and similar. 159 * 160 * States and their associated locks: 161 * 162 * LSONPROC, LSZOMB: 163 * 164 * Always covered by spc_lwplock, which protects running LWPs. 165 * This is a per-CPU lock and matches lwp::l_cpu. 166 * 167 * LSIDL, LSRUN: 168 * 169 * Always covered by spc_mutex, which protects the run queues. 170 * This is a per-CPU lock and matches lwp::l_cpu. 171 * 172 * LSSLEEP: 173 * 174 * Covered by a lock associated with the sleep queue that the 175 * LWP resides on. Matches lwp::l_sleepq::sq_mutex. 176 * 177 * LSSTOP, LSSUSPENDED: 178 * 179 * If the LWP was previously sleeping (l_wchan != NULL), then 180 * l_mutex references the sleep queue lock. If the LWP was 181 * runnable or on the CPU when halted, or has been removed from 182 * the sleep queue since halted, then the lock is spc_lwplock. 183 * 184 * The lock order is as follows: 185 * 186 * spc::spc_lwplock -> 187 * sleeptab::st_mutex -> 188 * tschain_t::tc_mutex -> 189 * spc::spc_mutex 190 * 191 * Each process has an scheduler state lock (proc::p_lock), and a 192 * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and 193 * so on. When an LWP is to be entered into or removed from one of the 194 * following states, p_lock must be held and the process wide counters 195 * adjusted: 196 * 197 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED 198 * 199 * (But not always for kernel threads. There are some special cases 200 * as mentioned above. See kern_softint.c.) 201 * 202 * Note that an LWP is considered running or likely to run soon if in 203 * one of the following states. This affects the value of p_nrlwps: 204 * 205 * LSRUN, LSONPROC, LSSLEEP 206 * 207 * p_lock does not need to be held when transitioning among these 208 * three states, hence p_lock is rarely taken for state transitions. 209 */ 210 211 #include <sys/cdefs.h> 212 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.149 2010/06/13 04:13:31 yamt Exp $"); 213 214 #include "opt_ddb.h" 215 #include "opt_lockdebug.h" 216 #include "opt_sa.h" 217 #include "opt_dtrace.h" 218 219 #define _LWP_API_PRIVATE 220 221 #include <sys/param.h> 222 #include <sys/systm.h> 223 #include <sys/cpu.h> 224 #include <sys/pool.h> 225 #include <sys/proc.h> 226 #include <sys/sa.h> 227 #include <sys/savar.h> 228 #include <sys/syscallargs.h> 229 #include <sys/syscall_stats.h> 230 #include <sys/kauth.h> 231 #include <sys/sleepq.h> 232 #include <sys/lockdebug.h> 233 #include <sys/kmem.h> 234 #include <sys/pset.h> 235 #include <sys/intr.h> 236 #include <sys/lwpctl.h> 237 #include <sys/atomic.h> 238 #include <sys/filedesc.h> 239 #include <sys/dtrace_bsd.h> 240 #include <sys/sdt.h> 241 242 #include <uvm/uvm_extern.h> 243 #include <uvm/uvm_object.h> 244 245 struct lwplist alllwp = LIST_HEAD_INITIALIZER(alllwp); 246 static pool_cache_t lwp_cache; 247 248 /* DTrace proc provider probes */ 249 SDT_PROBE_DEFINE(proc,,,lwp_create, 250 "struct lwp *", NULL, 251 NULL, NULL, NULL, NULL, 252 NULL, NULL, NULL, NULL); 253 SDT_PROBE_DEFINE(proc,,,lwp_start, 254 "struct lwp *", NULL, 255 NULL, NULL, NULL, NULL, 256 NULL, NULL, NULL, NULL); 257 SDT_PROBE_DEFINE(proc,,,lwp_exit, 258 "struct lwp *", NULL, 259 NULL, NULL, NULL, NULL, 260 NULL, NULL, NULL, NULL); 261 262 struct turnstile turnstile0; 263 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = { 264 #ifdef LWP0_CPU_INFO 265 .l_cpu = LWP0_CPU_INFO, 266 #endif 267 .l_proc = &proc0, 268 .l_lid = 1, 269 .l_flag = LW_SYSTEM, 270 .l_stat = LSONPROC, 271 .l_ts = &turnstile0, 272 .l_syncobj = &sched_syncobj, 273 .l_refcnt = 1, 274 .l_priority = PRI_USER + NPRI_USER - 1, 275 .l_inheritedprio = -1, 276 .l_class = SCHED_OTHER, 277 .l_psid = PS_NONE, 278 .l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders), 279 .l_name = __UNCONST("swapper"), 280 .l_fd = &filedesc0, 281 }; 282 283 void 284 lwpinit(void) 285 { 286 287 lwpinit_specificdata(); 288 lwp_sys_init(); 289 lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0, 290 "lwppl", NULL, IPL_NONE, NULL, NULL, NULL); 291 } 292 293 void 294 lwp0_init(void) 295 { 296 struct lwp *l = &lwp0; 297 298 KASSERT((void *)uvm_lwp_getuarea(l) != NULL); 299 KASSERT(l->l_lid == proc0.p_nlwpid); 300 301 LIST_INSERT_HEAD(&alllwp, l, l_list); 302 303 callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE); 304 callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l); 305 cv_init(&l->l_sigcv, "sigwait"); 306 307 kauth_cred_hold(proc0.p_cred); 308 l->l_cred = proc0.p_cred; 309 310 lwp_initspecific(l); 311 312 SYSCALL_TIME_LWP_INIT(l); 313 } 314 315 /* 316 * Set an suspended. 317 * 318 * Must be called with p_lock held, and the LWP locked. Will unlock the 319 * LWP before return. 320 */ 321 int 322 lwp_suspend(struct lwp *curl, struct lwp *t) 323 { 324 int error; 325 326 KASSERT(mutex_owned(t->l_proc->p_lock)); 327 KASSERT(lwp_locked(t, NULL)); 328 329 KASSERT(curl != t || curl->l_stat == LSONPROC); 330 331 /* 332 * If the current LWP has been told to exit, we must not suspend anyone 333 * else or deadlock could occur. We won't return to userspace. 334 */ 335 if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) { 336 lwp_unlock(t); 337 return (EDEADLK); 338 } 339 340 error = 0; 341 342 switch (t->l_stat) { 343 case LSRUN: 344 case LSONPROC: 345 t->l_flag |= LW_WSUSPEND; 346 lwp_need_userret(t); 347 lwp_unlock(t); 348 break; 349 350 case LSSLEEP: 351 t->l_flag |= LW_WSUSPEND; 352 353 /* 354 * Kick the LWP and try to get it to the kernel boundary 355 * so that it will release any locks that it holds. 356 * setrunnable() will release the lock. 357 */ 358 if ((t->l_flag & LW_SINTR) != 0) 359 setrunnable(t); 360 else 361 lwp_unlock(t); 362 break; 363 364 case LSSUSPENDED: 365 lwp_unlock(t); 366 break; 367 368 case LSSTOP: 369 t->l_flag |= LW_WSUSPEND; 370 setrunnable(t); 371 break; 372 373 case LSIDL: 374 case LSZOMB: 375 error = EINTR; /* It's what Solaris does..... */ 376 lwp_unlock(t); 377 break; 378 } 379 380 return (error); 381 } 382 383 /* 384 * Restart a suspended LWP. 385 * 386 * Must be called with p_lock held, and the LWP locked. Will unlock the 387 * LWP before return. 388 */ 389 void 390 lwp_continue(struct lwp *l) 391 { 392 393 KASSERT(mutex_owned(l->l_proc->p_lock)); 394 KASSERT(lwp_locked(l, NULL)); 395 396 /* If rebooting or not suspended, then just bail out. */ 397 if ((l->l_flag & LW_WREBOOT) != 0) { 398 lwp_unlock(l); 399 return; 400 } 401 402 l->l_flag &= ~LW_WSUSPEND; 403 404 if (l->l_stat != LSSUSPENDED) { 405 lwp_unlock(l); 406 return; 407 } 408 409 /* setrunnable() will release the lock. */ 410 setrunnable(l); 411 } 412 413 /* 414 * Restart a stopped LWP. 415 * 416 * Must be called with p_lock held, and the LWP NOT locked. Will unlock the 417 * LWP before return. 418 */ 419 void 420 lwp_unstop(struct lwp *l) 421 { 422 struct proc *p = l->l_proc; 423 424 KASSERT(mutex_owned(proc_lock)); 425 KASSERT(mutex_owned(p->p_lock)); 426 427 lwp_lock(l); 428 429 /* If not stopped, then just bail out. */ 430 if (l->l_stat != LSSTOP) { 431 lwp_unlock(l); 432 return; 433 } 434 435 p->p_stat = SACTIVE; 436 p->p_sflag &= ~PS_STOPPING; 437 438 if (!p->p_waited) 439 p->p_pptr->p_nstopchild--; 440 441 if (l->l_wchan == NULL) { 442 /* setrunnable() will release the lock. */ 443 setrunnable(l); 444 } else { 445 l->l_stat = LSSLEEP; 446 p->p_nrlwps++; 447 lwp_unlock(l); 448 } 449 } 450 451 /* 452 * Wait for an LWP within the current process to exit. If 'lid' is 453 * non-zero, we are waiting for a specific LWP. 454 * 455 * Must be called with p->p_lock held. 456 */ 457 int 458 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags) 459 { 460 struct proc *p = l->l_proc; 461 struct lwp *l2; 462 int nfound, error; 463 lwpid_t curlid; 464 bool exiting; 465 466 KASSERT(mutex_owned(p->p_lock)); 467 468 p->p_nlwpwait++; 469 l->l_waitingfor = lid; 470 curlid = l->l_lid; 471 exiting = ((flags & LWPWAIT_EXITCONTROL) != 0); 472 473 for (;;) { 474 /* 475 * Avoid a race between exit1() and sigexit(): if the 476 * process is dumping core, then we need to bail out: call 477 * into lwp_userret() where we will be suspended until the 478 * deed is done. 479 */ 480 if ((p->p_sflag & PS_WCORE) != 0) { 481 mutex_exit(p->p_lock); 482 lwp_userret(l); 483 #ifdef DIAGNOSTIC 484 panic("lwp_wait1"); 485 #endif 486 /* NOTREACHED */ 487 } 488 489 /* 490 * First off, drain any detached LWP that is waiting to be 491 * reaped. 492 */ 493 while ((l2 = p->p_zomblwp) != NULL) { 494 p->p_zomblwp = NULL; 495 lwp_free(l2, false, false);/* releases proc mutex */ 496 mutex_enter(p->p_lock); 497 } 498 499 /* 500 * Now look for an LWP to collect. If the whole process is 501 * exiting, count detached LWPs as eligible to be collected, 502 * but don't drain them here. 503 */ 504 nfound = 0; 505 error = 0; 506 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 507 /* 508 * If a specific wait and the target is waiting on 509 * us, then avoid deadlock. This also traps LWPs 510 * that try to wait on themselves. 511 * 512 * Note that this does not handle more complicated 513 * cycles, like: t1 -> t2 -> t3 -> t1. The process 514 * can still be killed so it is not a major problem. 515 */ 516 if (l2->l_lid == lid && l2->l_waitingfor == curlid) { 517 error = EDEADLK; 518 break; 519 } 520 if (l2 == l) 521 continue; 522 if ((l2->l_prflag & LPR_DETACHED) != 0) { 523 nfound += exiting; 524 continue; 525 } 526 if (lid != 0) { 527 if (l2->l_lid != lid) 528 continue; 529 /* 530 * Mark this LWP as the first waiter, if there 531 * is no other. 532 */ 533 if (l2->l_waiter == 0) 534 l2->l_waiter = curlid; 535 } else if (l2->l_waiter != 0) { 536 /* 537 * It already has a waiter - so don't 538 * collect it. If the waiter doesn't 539 * grab it we'll get another chance 540 * later. 541 */ 542 nfound++; 543 continue; 544 } 545 nfound++; 546 547 /* No need to lock the LWP in order to see LSZOMB. */ 548 if (l2->l_stat != LSZOMB) 549 continue; 550 551 /* 552 * We're no longer waiting. Reset the "first waiter" 553 * pointer on the target, in case it was us. 554 */ 555 l->l_waitingfor = 0; 556 l2->l_waiter = 0; 557 p->p_nlwpwait--; 558 if (departed) 559 *departed = l2->l_lid; 560 sched_lwp_collect(l2); 561 562 /* lwp_free() releases the proc lock. */ 563 lwp_free(l2, false, false); 564 mutex_enter(p->p_lock); 565 return 0; 566 } 567 568 if (error != 0) 569 break; 570 if (nfound == 0) { 571 error = ESRCH; 572 break; 573 } 574 575 /* 576 * The kernel is careful to ensure that it can not deadlock 577 * when exiting - just keep waiting. 578 */ 579 if (exiting) { 580 KASSERT(p->p_nlwps > 1); 581 cv_wait(&p->p_lwpcv, p->p_lock); 582 continue; 583 } 584 585 /* 586 * If all other LWPs are waiting for exits or suspends 587 * and the supply of zombies and potential zombies is 588 * exhausted, then we are about to deadlock. 589 * 590 * If the process is exiting (and this LWP is not the one 591 * that is coordinating the exit) then bail out now. 592 */ 593 if ((p->p_sflag & PS_WEXIT) != 0 || 594 p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) { 595 error = EDEADLK; 596 break; 597 } 598 599 /* 600 * Sit around and wait for something to happen. We'll be 601 * awoken if any of the conditions examined change: if an 602 * LWP exits, is collected, or is detached. 603 */ 604 if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0) 605 break; 606 } 607 608 /* 609 * We didn't find any LWPs to collect, we may have received a 610 * signal, or some other condition has caused us to bail out. 611 * 612 * If waiting on a specific LWP, clear the waiters marker: some 613 * other LWP may want it. Then, kick all the remaining waiters 614 * so that they can re-check for zombies and for deadlock. 615 */ 616 if (lid != 0) { 617 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 618 if (l2->l_lid == lid) { 619 if (l2->l_waiter == curlid) 620 l2->l_waiter = 0; 621 break; 622 } 623 } 624 } 625 p->p_nlwpwait--; 626 l->l_waitingfor = 0; 627 cv_broadcast(&p->p_lwpcv); 628 629 return error; 630 } 631 632 /* 633 * Create a new LWP within process 'p2', using LWP 'l1' as a template. 634 * The new LWP is created in state LSIDL and must be set running, 635 * suspended, or stopped by the caller. 636 */ 637 int 638 lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags, 639 void *stack, size_t stacksize, void (*func)(void *), void *arg, 640 lwp_t **rnewlwpp, int sclass) 641 { 642 struct lwp *l2, *isfree; 643 turnstile_t *ts; 644 645 KASSERT(l1 == curlwp || l1->l_proc == &proc0); 646 647 /* 648 * First off, reap any detached LWP waiting to be collected. 649 * We can re-use its LWP structure and turnstile. 650 */ 651 isfree = NULL; 652 if (p2->p_zomblwp != NULL) { 653 mutex_enter(p2->p_lock); 654 if ((isfree = p2->p_zomblwp) != NULL) { 655 p2->p_zomblwp = NULL; 656 lwp_free(isfree, true, false);/* releases proc mutex */ 657 } else 658 mutex_exit(p2->p_lock); 659 } 660 if (isfree == NULL) { 661 l2 = pool_cache_get(lwp_cache, PR_WAITOK); 662 memset(l2, 0, sizeof(*l2)); 663 l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK); 664 SLIST_INIT(&l2->l_pi_lenders); 665 } else { 666 l2 = isfree; 667 ts = l2->l_ts; 668 KASSERT(l2->l_inheritedprio == -1); 669 KASSERT(SLIST_EMPTY(&l2->l_pi_lenders)); 670 memset(l2, 0, sizeof(*l2)); 671 l2->l_ts = ts; 672 } 673 674 l2->l_stat = LSIDL; 675 l2->l_proc = p2; 676 l2->l_refcnt = 1; 677 l2->l_class = sclass; 678 679 /* 680 * If vfork(), we want the LWP to run fast and on the same CPU 681 * as its parent, so that it can reuse the VM context and cache 682 * footprint on the local CPU. 683 */ 684 l2->l_kpriority = ((flags & LWP_VFORK) ? true : false); 685 l2->l_kpribase = PRI_KERNEL; 686 l2->l_priority = l1->l_priority; 687 l2->l_inheritedprio = -1; 688 l2->l_flag = 0; 689 l2->l_pflag = LP_MPSAFE; 690 TAILQ_INIT(&l2->l_ld_locks); 691 692 /* 693 * If not the first LWP in the process, grab a reference to the 694 * descriptor table. 695 */ 696 l2->l_fd = p2->p_fd; 697 if (p2->p_nlwps != 0) { 698 KASSERT(l1->l_proc == p2); 699 fd_hold(l2); 700 } else { 701 KASSERT(l1->l_proc != p2); 702 } 703 704 if (p2->p_flag & PK_SYSTEM) { 705 /* Mark it as a system LWP. */ 706 l2->l_flag |= LW_SYSTEM; 707 } 708 709 kpreempt_disable(); 710 l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex; 711 l2->l_cpu = l1->l_cpu; 712 kpreempt_enable(); 713 714 kdtrace_thread_ctor(NULL, l2); 715 lwp_initspecific(l2); 716 sched_lwp_fork(l1, l2); 717 lwp_update_creds(l2); 718 callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE); 719 callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2); 720 cv_init(&l2->l_sigcv, "sigwait"); 721 l2->l_syncobj = &sched_syncobj; 722 723 if (rnewlwpp != NULL) 724 *rnewlwpp = l2; 725 726 uvm_lwp_setuarea(l2, uaddr); 727 uvm_lwp_fork(l1, l2, stack, stacksize, func, 728 (arg != NULL) ? arg : l2); 729 730 mutex_enter(p2->p_lock); 731 732 if ((flags & LWP_DETACHED) != 0) { 733 l2->l_prflag = LPR_DETACHED; 734 p2->p_ndlwps++; 735 } else 736 l2->l_prflag = 0; 737 738 l2->l_sigmask = l1->l_sigmask; 739 CIRCLEQ_INIT(&l2->l_sigpend.sp_info); 740 sigemptyset(&l2->l_sigpend.sp_set); 741 742 p2->p_nlwpid++; 743 if (p2->p_nlwpid == 0) 744 p2->p_nlwpid++; 745 l2->l_lid = p2->p_nlwpid; 746 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling); 747 p2->p_nlwps++; 748 p2->p_nrlwps++; 749 750 if ((p2->p_flag & PK_SYSTEM) == 0) { 751 /* Inherit an affinity */ 752 if (l1->l_flag & LW_AFFINITY) { 753 /* 754 * Note that we hold the state lock while inheriting 755 * the affinity to avoid race with sched_setaffinity(). 756 */ 757 lwp_lock(l1); 758 if (l1->l_flag & LW_AFFINITY) { 759 kcpuset_use(l1->l_affinity); 760 l2->l_affinity = l1->l_affinity; 761 l2->l_flag |= LW_AFFINITY; 762 } 763 lwp_unlock(l1); 764 } 765 lwp_lock(l2); 766 /* Inherit a processor-set */ 767 l2->l_psid = l1->l_psid; 768 /* Look for a CPU to start */ 769 l2->l_cpu = sched_takecpu(l2); 770 lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex); 771 } 772 mutex_exit(p2->p_lock); 773 774 SDT_PROBE(proc,,,lwp_create, l2, 0,0,0,0); 775 776 mutex_enter(proc_lock); 777 LIST_INSERT_HEAD(&alllwp, l2, l_list); 778 mutex_exit(proc_lock); 779 780 SYSCALL_TIME_LWP_INIT(l2); 781 782 if (p2->p_emul->e_lwp_fork) 783 (*p2->p_emul->e_lwp_fork)(l1, l2); 784 785 return (0); 786 } 787 788 /* 789 * Called by MD code when a new LWP begins execution. Must be called 790 * with the previous LWP locked (so at splsched), or if there is no 791 * previous LWP, at splsched. 792 */ 793 void 794 lwp_startup(struct lwp *prev, struct lwp *new) 795 { 796 797 SDT_PROBE(proc,,,lwp_start, new, 0,0,0,0); 798 799 KASSERT(kpreempt_disabled()); 800 if (prev != NULL) { 801 /* 802 * Normalize the count of the spin-mutexes, it was 803 * increased in mi_switch(). Unmark the state of 804 * context switch - it is finished for previous LWP. 805 */ 806 curcpu()->ci_mtx_count++; 807 membar_exit(); 808 prev->l_ctxswtch = 0; 809 } 810 KPREEMPT_DISABLE(new); 811 spl0(); 812 pmap_activate(new); 813 LOCKDEBUG_BARRIER(NULL, 0); 814 KPREEMPT_ENABLE(new); 815 if ((new->l_pflag & LP_MPSAFE) == 0) { 816 KERNEL_LOCK(1, new); 817 } 818 } 819 820 /* 821 * Exit an LWP. 822 */ 823 void 824 lwp_exit(struct lwp *l) 825 { 826 struct proc *p = l->l_proc; 827 struct lwp *l2; 828 bool current; 829 830 current = (l == curlwp); 831 832 KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL)); 833 KASSERT(p == curproc); 834 835 SDT_PROBE(proc,,,lwp_exit, l, 0,0,0,0); 836 837 /* 838 * Verify that we hold no locks other than the kernel lock. 839 */ 840 LOCKDEBUG_BARRIER(&kernel_lock, 0); 841 842 /* 843 * If we are the last live LWP in a process, we need to exit the 844 * entire process. We do so with an exit status of zero, because 845 * it's a "controlled" exit, and because that's what Solaris does. 846 * 847 * We are not quite a zombie yet, but for accounting purposes we 848 * must increment the count of zombies here. 849 * 850 * Note: the last LWP's specificdata will be deleted here. 851 */ 852 mutex_enter(p->p_lock); 853 if (p->p_nlwps - p->p_nzlwps == 1) { 854 KASSERT(current == true); 855 /* XXXSMP kernel_lock not held */ 856 exit1(l, 0); 857 /* NOTREACHED */ 858 } 859 p->p_nzlwps++; 860 mutex_exit(p->p_lock); 861 862 if (p->p_emul->e_lwp_exit) 863 (*p->p_emul->e_lwp_exit)(l); 864 865 /* Drop filedesc reference. */ 866 fd_free(); 867 868 /* Delete the specificdata while it's still safe to sleep. */ 869 lwp_finispecific(l); 870 871 /* 872 * Release our cached credentials. 873 */ 874 kauth_cred_free(l->l_cred); 875 callout_destroy(&l->l_timeout_ch); 876 877 /* 878 * Remove the LWP from the global list. 879 */ 880 mutex_enter(proc_lock); 881 LIST_REMOVE(l, l_list); 882 mutex_exit(proc_lock); 883 884 /* 885 * Get rid of all references to the LWP that others (e.g. procfs) 886 * may have, and mark the LWP as a zombie. If the LWP is detached, 887 * mark it waiting for collection in the proc structure. Note that 888 * before we can do that, we need to free any other dead, deatched 889 * LWP waiting to meet its maker. 890 */ 891 mutex_enter(p->p_lock); 892 lwp_drainrefs(l); 893 894 if ((l->l_prflag & LPR_DETACHED) != 0) { 895 while ((l2 = p->p_zomblwp) != NULL) { 896 p->p_zomblwp = NULL; 897 lwp_free(l2, false, false);/* releases proc mutex */ 898 mutex_enter(p->p_lock); 899 l->l_refcnt++; 900 lwp_drainrefs(l); 901 } 902 p->p_zomblwp = l; 903 } 904 905 /* 906 * If we find a pending signal for the process and we have been 907 * asked to check for signals, then we loose: arrange to have 908 * all other LWPs in the process check for signals. 909 */ 910 if ((l->l_flag & LW_PENDSIG) != 0 && 911 firstsig(&p->p_sigpend.sp_set) != 0) { 912 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 913 lwp_lock(l2); 914 l2->l_flag |= LW_PENDSIG; 915 lwp_unlock(l2); 916 } 917 } 918 919 lwp_lock(l); 920 l->l_stat = LSZOMB; 921 if (l->l_name != NULL) 922 strcpy(l->l_name, "(zombie)"); 923 if (l->l_flag & LW_AFFINITY) { 924 l->l_flag &= ~LW_AFFINITY; 925 } else { 926 KASSERT(l->l_affinity == NULL); 927 } 928 lwp_unlock(l); 929 p->p_nrlwps--; 930 cv_broadcast(&p->p_lwpcv); 931 if (l->l_lwpctl != NULL) 932 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; 933 mutex_exit(p->p_lock); 934 935 /* Safe without lock since LWP is in zombie state */ 936 if (l->l_affinity) { 937 kcpuset_unuse(l->l_affinity, NULL); 938 l->l_affinity = NULL; 939 } 940 941 /* 942 * We can no longer block. At this point, lwp_free() may already 943 * be gunning for us. On a multi-CPU system, we may be off p_lwps. 944 * 945 * Free MD LWP resources. 946 */ 947 cpu_lwp_free(l, 0); 948 949 if (current) { 950 pmap_deactivate(l); 951 952 /* 953 * Release the kernel lock, and switch away into 954 * oblivion. 955 */ 956 #ifdef notyet 957 /* XXXSMP hold in lwp_userret() */ 958 KERNEL_UNLOCK_LAST(l); 959 #else 960 KERNEL_UNLOCK_ALL(l, NULL); 961 #endif 962 lwp_exit_switchaway(l); 963 } 964 } 965 966 /* 967 * Free a dead LWP's remaining resources. 968 * 969 * XXXLWP limits. 970 */ 971 void 972 lwp_free(struct lwp *l, bool recycle, bool last) 973 { 974 struct proc *p = l->l_proc; 975 struct rusage *ru; 976 ksiginfoq_t kq; 977 978 KASSERT(l != curlwp); 979 980 /* 981 * If this was not the last LWP in the process, then adjust 982 * counters and unlock. 983 */ 984 if (!last) { 985 /* 986 * Add the LWP's run time to the process' base value. 987 * This needs to co-incide with coming off p_lwps. 988 */ 989 bintime_add(&p->p_rtime, &l->l_rtime); 990 p->p_pctcpu += l->l_pctcpu; 991 ru = &p->p_stats->p_ru; 992 ruadd(ru, &l->l_ru); 993 ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw); 994 ru->ru_nivcsw += l->l_nivcsw; 995 LIST_REMOVE(l, l_sibling); 996 p->p_nlwps--; 997 p->p_nzlwps--; 998 if ((l->l_prflag & LPR_DETACHED) != 0) 999 p->p_ndlwps--; 1000 1001 /* 1002 * Have any LWPs sleeping in lwp_wait() recheck for 1003 * deadlock. 1004 */ 1005 cv_broadcast(&p->p_lwpcv); 1006 mutex_exit(p->p_lock); 1007 } 1008 1009 #ifdef MULTIPROCESSOR 1010 /* 1011 * In the unlikely event that the LWP is still on the CPU, 1012 * then spin until it has switched away. We need to release 1013 * all locks to avoid deadlock against interrupt handlers on 1014 * the target CPU. 1015 */ 1016 if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) { 1017 int count; 1018 (void)count; /* XXXgcc */ 1019 KERNEL_UNLOCK_ALL(curlwp, &count); 1020 while ((l->l_pflag & LP_RUNNING) != 0 || 1021 l->l_cpu->ci_curlwp == l) 1022 SPINLOCK_BACKOFF_HOOK; 1023 KERNEL_LOCK(count, curlwp); 1024 } 1025 #endif 1026 1027 /* 1028 * Destroy the LWP's remaining signal information. 1029 */ 1030 ksiginfo_queue_init(&kq); 1031 sigclear(&l->l_sigpend, NULL, &kq); 1032 ksiginfo_queue_drain(&kq); 1033 cv_destroy(&l->l_sigcv); 1034 1035 /* 1036 * Free the LWP's turnstile and the LWP structure itself unless the 1037 * caller wants to recycle them. Also, free the scheduler specific 1038 * data. 1039 * 1040 * We can't return turnstile0 to the pool (it didn't come from it), 1041 * so if it comes up just drop it quietly and move on. 1042 * 1043 * We don't recycle the VM resources at this time. 1044 */ 1045 if (l->l_lwpctl != NULL) 1046 lwp_ctl_free(l); 1047 1048 if (!recycle && l->l_ts != &turnstile0) 1049 pool_cache_put(turnstile_cache, l->l_ts); 1050 if (l->l_name != NULL) 1051 kmem_free(l->l_name, MAXCOMLEN); 1052 1053 cpu_lwp_free2(l); 1054 uvm_lwp_exit(l); 1055 1056 KASSERT(SLIST_EMPTY(&l->l_pi_lenders)); 1057 KASSERT(l->l_inheritedprio == -1); 1058 kdtrace_thread_dtor(NULL, l); 1059 if (!recycle) 1060 pool_cache_put(lwp_cache, l); 1061 } 1062 1063 /* 1064 * Migrate the LWP to the another CPU. Unlocks the LWP. 1065 */ 1066 void 1067 lwp_migrate(lwp_t *l, struct cpu_info *tci) 1068 { 1069 struct schedstate_percpu *tspc; 1070 int lstat = l->l_stat; 1071 1072 KASSERT(lwp_locked(l, NULL)); 1073 KASSERT(tci != NULL); 1074 1075 /* If LWP is still on the CPU, it must be handled like LSONPROC */ 1076 if ((l->l_pflag & LP_RUNNING) != 0) { 1077 lstat = LSONPROC; 1078 } 1079 1080 /* 1081 * The destination CPU could be changed while previous migration 1082 * was not finished. 1083 */ 1084 if (l->l_target_cpu != NULL) { 1085 l->l_target_cpu = tci; 1086 lwp_unlock(l); 1087 return; 1088 } 1089 1090 /* Nothing to do if trying to migrate to the same CPU */ 1091 if (l->l_cpu == tci) { 1092 lwp_unlock(l); 1093 return; 1094 } 1095 1096 KASSERT(l->l_target_cpu == NULL); 1097 tspc = &tci->ci_schedstate; 1098 switch (lstat) { 1099 case LSRUN: 1100 l->l_target_cpu = tci; 1101 break; 1102 case LSIDL: 1103 l->l_cpu = tci; 1104 lwp_unlock_to(l, tspc->spc_mutex); 1105 return; 1106 case LSSLEEP: 1107 l->l_cpu = tci; 1108 break; 1109 case LSSTOP: 1110 case LSSUSPENDED: 1111 l->l_cpu = tci; 1112 if (l->l_wchan == NULL) { 1113 lwp_unlock_to(l, tspc->spc_lwplock); 1114 return; 1115 } 1116 break; 1117 case LSONPROC: 1118 l->l_target_cpu = tci; 1119 spc_lock(l->l_cpu); 1120 cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT); 1121 spc_unlock(l->l_cpu); 1122 break; 1123 } 1124 lwp_unlock(l); 1125 } 1126 1127 /* 1128 * Find the LWP in the process. Arguments may be zero, in such case, 1129 * the calling process and first LWP in the list will be used. 1130 * On success - returns proc locked. 1131 */ 1132 struct lwp * 1133 lwp_find2(pid_t pid, lwpid_t lid) 1134 { 1135 proc_t *p; 1136 lwp_t *l; 1137 1138 /* Find the process */ 1139 p = (pid == 0) ? curlwp->l_proc : p_find(pid, PFIND_UNLOCK_FAIL); 1140 if (p == NULL) 1141 return NULL; 1142 mutex_enter(p->p_lock); 1143 if (pid != 0) { 1144 /* Case of p_find */ 1145 mutex_exit(proc_lock); 1146 } 1147 1148 /* Find the thread */ 1149 l = (lid == 0) ? LIST_FIRST(&p->p_lwps) : lwp_find(p, lid); 1150 if (l == NULL) { 1151 mutex_exit(p->p_lock); 1152 } 1153 1154 return l; 1155 } 1156 1157 /* 1158 * Look up a live LWP within the speicifed process, and return it locked. 1159 * 1160 * Must be called with p->p_lock held. 1161 */ 1162 struct lwp * 1163 lwp_find(struct proc *p, int id) 1164 { 1165 struct lwp *l; 1166 1167 KASSERT(mutex_owned(p->p_lock)); 1168 1169 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1170 if (l->l_lid == id) 1171 break; 1172 } 1173 1174 /* 1175 * No need to lock - all of these conditions will 1176 * be visible with the process level mutex held. 1177 */ 1178 if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB)) 1179 l = NULL; 1180 1181 return l; 1182 } 1183 1184 /* 1185 * Update an LWP's cached credentials to mirror the process' master copy. 1186 * 1187 * This happens early in the syscall path, on user trap, and on LWP 1188 * creation. A long-running LWP can also voluntarily choose to update 1189 * it's credentials by calling this routine. This may be called from 1190 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand. 1191 */ 1192 void 1193 lwp_update_creds(struct lwp *l) 1194 { 1195 kauth_cred_t oc; 1196 struct proc *p; 1197 1198 p = l->l_proc; 1199 oc = l->l_cred; 1200 1201 mutex_enter(p->p_lock); 1202 kauth_cred_hold(p->p_cred); 1203 l->l_cred = p->p_cred; 1204 l->l_prflag &= ~LPR_CRMOD; 1205 mutex_exit(p->p_lock); 1206 if (oc != NULL) 1207 kauth_cred_free(oc); 1208 } 1209 1210 /* 1211 * Verify that an LWP is locked, and optionally verify that the lock matches 1212 * one we specify. 1213 */ 1214 int 1215 lwp_locked(struct lwp *l, kmutex_t *mtx) 1216 { 1217 kmutex_t *cur = l->l_mutex; 1218 1219 return mutex_owned(cur) && (mtx == cur || mtx == NULL); 1220 } 1221 1222 /* 1223 * Lock an LWP. 1224 */ 1225 kmutex_t * 1226 lwp_lock_retry(struct lwp *l, kmutex_t *old) 1227 { 1228 1229 /* 1230 * XXXgcc ignoring kmutex_t * volatile on i386 1231 * 1232 * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021) 1233 */ 1234 #if 1 1235 while (l->l_mutex != old) { 1236 #else 1237 for (;;) { 1238 #endif 1239 mutex_spin_exit(old); 1240 old = l->l_mutex; 1241 mutex_spin_enter(old); 1242 1243 /* 1244 * mutex_enter() will have posted a read barrier. Re-test 1245 * l->l_mutex. If it has changed, we need to try again. 1246 */ 1247 #if 1 1248 } 1249 #else 1250 } while (__predict_false(l->l_mutex != old)); 1251 #endif 1252 1253 return old; 1254 } 1255 1256 /* 1257 * Lend a new mutex to an LWP. The old mutex must be held. 1258 */ 1259 void 1260 lwp_setlock(struct lwp *l, kmutex_t *new) 1261 { 1262 1263 KASSERT(mutex_owned(l->l_mutex)); 1264 1265 membar_exit(); 1266 l->l_mutex = new; 1267 } 1268 1269 /* 1270 * Lend a new mutex to an LWP, and release the old mutex. The old mutex 1271 * must be held. 1272 */ 1273 void 1274 lwp_unlock_to(struct lwp *l, kmutex_t *new) 1275 { 1276 kmutex_t *old; 1277 1278 KASSERT(mutex_owned(l->l_mutex)); 1279 1280 old = l->l_mutex; 1281 membar_exit(); 1282 l->l_mutex = new; 1283 mutex_spin_exit(old); 1284 } 1285 1286 /* 1287 * Acquire a new mutex, and donate it to an LWP. The LWP must already be 1288 * locked. 1289 */ 1290 void 1291 lwp_relock(struct lwp *l, kmutex_t *new) 1292 { 1293 kmutex_t *old; 1294 1295 KASSERT(mutex_owned(l->l_mutex)); 1296 1297 old = l->l_mutex; 1298 if (old != new) { 1299 mutex_spin_enter(new); 1300 l->l_mutex = new; 1301 mutex_spin_exit(old); 1302 } 1303 } 1304 1305 int 1306 lwp_trylock(struct lwp *l) 1307 { 1308 kmutex_t *old; 1309 1310 for (;;) { 1311 if (!mutex_tryenter(old = l->l_mutex)) 1312 return 0; 1313 if (__predict_true(l->l_mutex == old)) 1314 return 1; 1315 mutex_spin_exit(old); 1316 } 1317 } 1318 1319 void 1320 lwp_unsleep(lwp_t *l, bool cleanup) 1321 { 1322 1323 KASSERT(mutex_owned(l->l_mutex)); 1324 (*l->l_syncobj->sobj_unsleep)(l, cleanup); 1325 } 1326 1327 1328 /* 1329 * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is 1330 * set. 1331 */ 1332 void 1333 lwp_userret(struct lwp *l) 1334 { 1335 struct proc *p; 1336 void (*hook)(void); 1337 int sig; 1338 1339 KASSERT(l == curlwp); 1340 KASSERT(l->l_stat == LSONPROC); 1341 p = l->l_proc; 1342 1343 #ifndef __HAVE_FAST_SOFTINTS 1344 /* Run pending soft interrupts. */ 1345 if (l->l_cpu->ci_data.cpu_softints != 0) 1346 softint_overlay(); 1347 #endif 1348 1349 #ifdef KERN_SA 1350 /* Generate UNBLOCKED upcall if needed */ 1351 if (l->l_flag & LW_SA_BLOCKING) { 1352 sa_unblock_userret(l); 1353 /* NOTREACHED */ 1354 } 1355 #endif 1356 1357 /* 1358 * It should be safe to do this read unlocked on a multiprocessor 1359 * system.. 1360 * 1361 * LW_SA_UPCALL will be handled after the while() loop, so don't 1362 * consider it now. 1363 */ 1364 while ((l->l_flag & (LW_USERRET & ~(LW_SA_UPCALL))) != 0) { 1365 /* 1366 * Process pending signals first, unless the process 1367 * is dumping core or exiting, where we will instead 1368 * enter the LW_WSUSPEND case below. 1369 */ 1370 if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) == 1371 LW_PENDSIG) { 1372 mutex_enter(p->p_lock); 1373 while ((sig = issignal(l)) != 0) 1374 postsig(sig); 1375 mutex_exit(p->p_lock); 1376 } 1377 1378 /* 1379 * Core-dump or suspend pending. 1380 * 1381 * In case of core dump, suspend ourselves, so that the 1382 * kernel stack and therefore the userland registers saved 1383 * in the trapframe are around for coredump() to write them 1384 * out. We issue a wakeup on p->p_lwpcv so that sigexit() 1385 * will write the core file out once all other LWPs are 1386 * suspended. 1387 */ 1388 if ((l->l_flag & LW_WSUSPEND) != 0) { 1389 mutex_enter(p->p_lock); 1390 p->p_nrlwps--; 1391 cv_broadcast(&p->p_lwpcv); 1392 lwp_lock(l); 1393 l->l_stat = LSSUSPENDED; 1394 lwp_unlock(l); 1395 mutex_exit(p->p_lock); 1396 lwp_lock(l); 1397 mi_switch(l); 1398 } 1399 1400 /* Process is exiting. */ 1401 if ((l->l_flag & LW_WEXIT) != 0) { 1402 lwp_exit(l); 1403 KASSERT(0); 1404 /* NOTREACHED */ 1405 } 1406 1407 /* Call userret hook; used by Linux emulation. */ 1408 if ((l->l_flag & LW_WUSERRET) != 0) { 1409 lwp_lock(l); 1410 l->l_flag &= ~LW_WUSERRET; 1411 lwp_unlock(l); 1412 hook = p->p_userret; 1413 p->p_userret = NULL; 1414 (*hook)(); 1415 } 1416 } 1417 1418 #ifdef KERN_SA 1419 /* 1420 * Timer events are handled specially. We only try once to deliver 1421 * pending timer upcalls; if if fails, we can try again on the next 1422 * loop around. If we need to re-enter lwp_userret(), MD code will 1423 * bounce us back here through the trap path after we return. 1424 */ 1425 if (p->p_timerpend) 1426 timerupcall(l); 1427 if (l->l_flag & LW_SA_UPCALL) 1428 sa_upcall_userret(l); 1429 #endif /* KERN_SA */ 1430 } 1431 1432 /* 1433 * Force an LWP to enter the kernel, to take a trip through lwp_userret(). 1434 */ 1435 void 1436 lwp_need_userret(struct lwp *l) 1437 { 1438 KASSERT(lwp_locked(l, NULL)); 1439 1440 /* 1441 * Since the tests in lwp_userret() are done unlocked, make sure 1442 * that the condition will be seen before forcing the LWP to enter 1443 * kernel mode. 1444 */ 1445 membar_producer(); 1446 cpu_signotify(l); 1447 } 1448 1449 /* 1450 * Add one reference to an LWP. This will prevent the LWP from 1451 * exiting, thus keep the lwp structure and PCB around to inspect. 1452 */ 1453 void 1454 lwp_addref(struct lwp *l) 1455 { 1456 1457 KASSERT(mutex_owned(l->l_proc->p_lock)); 1458 KASSERT(l->l_stat != LSZOMB); 1459 KASSERT(l->l_refcnt != 0); 1460 1461 l->l_refcnt++; 1462 } 1463 1464 /* 1465 * Remove one reference to an LWP. If this is the last reference, 1466 * then we must finalize the LWP's death. 1467 */ 1468 void 1469 lwp_delref(struct lwp *l) 1470 { 1471 struct proc *p = l->l_proc; 1472 1473 mutex_enter(p->p_lock); 1474 lwp_delref2(l); 1475 mutex_exit(p->p_lock); 1476 } 1477 1478 /* 1479 * Remove one reference to an LWP. If this is the last reference, 1480 * then we must finalize the LWP's death. The proc mutex is held 1481 * on entry. 1482 */ 1483 void 1484 lwp_delref2(struct lwp *l) 1485 { 1486 struct proc *p = l->l_proc; 1487 1488 KASSERT(mutex_owned(p->p_lock)); 1489 KASSERT(l->l_stat != LSZOMB); 1490 KASSERT(l->l_refcnt > 0); 1491 if (--l->l_refcnt == 0) 1492 cv_broadcast(&p->p_lwpcv); 1493 } 1494 1495 /* 1496 * Drain all references to the current LWP. 1497 */ 1498 void 1499 lwp_drainrefs(struct lwp *l) 1500 { 1501 struct proc *p = l->l_proc; 1502 1503 KASSERT(mutex_owned(p->p_lock)); 1504 KASSERT(l->l_refcnt != 0); 1505 1506 l->l_refcnt--; 1507 while (l->l_refcnt != 0) 1508 cv_wait(&p->p_lwpcv, p->p_lock); 1509 } 1510 1511 /* 1512 * Return true if the specified LWP is 'alive'. Only p->p_lock need 1513 * be held. 1514 */ 1515 bool 1516 lwp_alive(lwp_t *l) 1517 { 1518 1519 KASSERT(mutex_owned(l->l_proc->p_lock)); 1520 1521 switch (l->l_stat) { 1522 case LSSLEEP: 1523 case LSRUN: 1524 case LSONPROC: 1525 case LSSTOP: 1526 case LSSUSPENDED: 1527 return true; 1528 default: 1529 return false; 1530 } 1531 } 1532 1533 /* 1534 * Return first live LWP in the process. 1535 */ 1536 lwp_t * 1537 lwp_find_first(proc_t *p) 1538 { 1539 lwp_t *l; 1540 1541 KASSERT(mutex_owned(p->p_lock)); 1542 1543 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1544 if (lwp_alive(l)) { 1545 return l; 1546 } 1547 } 1548 1549 return NULL; 1550 } 1551 1552 /* 1553 * Allocate a new lwpctl structure for a user LWP. 1554 */ 1555 int 1556 lwp_ctl_alloc(vaddr_t *uaddr) 1557 { 1558 lcproc_t *lp; 1559 u_int bit, i, offset; 1560 struct uvm_object *uao; 1561 int error; 1562 lcpage_t *lcp; 1563 proc_t *p; 1564 lwp_t *l; 1565 1566 l = curlwp; 1567 p = l->l_proc; 1568 1569 if (l->l_lcpage != NULL) { 1570 lcp = l->l_lcpage; 1571 *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr; 1572 return 0; 1573 } 1574 1575 /* First time around, allocate header structure for the process. */ 1576 if ((lp = p->p_lwpctl) == NULL) { 1577 lp = kmem_alloc(sizeof(*lp), KM_SLEEP); 1578 mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE); 1579 lp->lp_uao = NULL; 1580 TAILQ_INIT(&lp->lp_pages); 1581 mutex_enter(p->p_lock); 1582 if (p->p_lwpctl == NULL) { 1583 p->p_lwpctl = lp; 1584 mutex_exit(p->p_lock); 1585 } else { 1586 mutex_exit(p->p_lock); 1587 mutex_destroy(&lp->lp_lock); 1588 kmem_free(lp, sizeof(*lp)); 1589 lp = p->p_lwpctl; 1590 } 1591 } 1592 1593 /* 1594 * Set up an anonymous memory region to hold the shared pages. 1595 * Map them into the process' address space. The user vmspace 1596 * gets the first reference on the UAO. 1597 */ 1598 mutex_enter(&lp->lp_lock); 1599 if (lp->lp_uao == NULL) { 1600 lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0); 1601 lp->lp_cur = 0; 1602 lp->lp_max = LWPCTL_UAREA_SZ; 1603 lp->lp_uva = p->p_emul->e_vm_default_addr(p, 1604 (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ); 1605 error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva, 1606 LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW, 1607 UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0)); 1608 if (error != 0) { 1609 uao_detach(lp->lp_uao); 1610 lp->lp_uao = NULL; 1611 mutex_exit(&lp->lp_lock); 1612 return error; 1613 } 1614 } 1615 1616 /* Get a free block and allocate for this LWP. */ 1617 TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) { 1618 if (lcp->lcp_nfree != 0) 1619 break; 1620 } 1621 if (lcp == NULL) { 1622 /* Nothing available - try to set up a free page. */ 1623 if (lp->lp_cur == lp->lp_max) { 1624 mutex_exit(&lp->lp_lock); 1625 return ENOMEM; 1626 } 1627 lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP); 1628 if (lcp == NULL) { 1629 mutex_exit(&lp->lp_lock); 1630 return ENOMEM; 1631 } 1632 /* 1633 * Wire the next page down in kernel space. Since this 1634 * is a new mapping, we must add a reference. 1635 */ 1636 uao = lp->lp_uao; 1637 (*uao->pgops->pgo_reference)(uao); 1638 lcp->lcp_kaddr = vm_map_min(kernel_map); 1639 error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE, 1640 uao, lp->lp_cur, PAGE_SIZE, 1641 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, 1642 UVM_INH_NONE, UVM_ADV_RANDOM, 0)); 1643 if (error != 0) { 1644 mutex_exit(&lp->lp_lock); 1645 kmem_free(lcp, LWPCTL_LCPAGE_SZ); 1646 (*uao->pgops->pgo_detach)(uao); 1647 return error; 1648 } 1649 error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr, 1650 lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0); 1651 if (error != 0) { 1652 mutex_exit(&lp->lp_lock); 1653 uvm_unmap(kernel_map, lcp->lcp_kaddr, 1654 lcp->lcp_kaddr + PAGE_SIZE); 1655 kmem_free(lcp, LWPCTL_LCPAGE_SZ); 1656 return error; 1657 } 1658 /* Prepare the page descriptor and link into the list. */ 1659 lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur; 1660 lp->lp_cur += PAGE_SIZE; 1661 lcp->lcp_nfree = LWPCTL_PER_PAGE; 1662 lcp->lcp_rotor = 0; 1663 memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ); 1664 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain); 1665 } 1666 for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) { 1667 if (++i >= LWPCTL_BITMAP_ENTRIES) 1668 i = 0; 1669 } 1670 bit = ffs(lcp->lcp_bitmap[i]) - 1; 1671 lcp->lcp_bitmap[i] ^= (1 << bit); 1672 lcp->lcp_rotor = i; 1673 lcp->lcp_nfree--; 1674 l->l_lcpage = lcp; 1675 offset = (i << 5) + bit; 1676 l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset; 1677 *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t); 1678 mutex_exit(&lp->lp_lock); 1679 1680 KPREEMPT_DISABLE(l); 1681 l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index; 1682 KPREEMPT_ENABLE(l); 1683 1684 return 0; 1685 } 1686 1687 /* 1688 * Free an lwpctl structure back to the per-process list. 1689 */ 1690 void 1691 lwp_ctl_free(lwp_t *l) 1692 { 1693 lcproc_t *lp; 1694 lcpage_t *lcp; 1695 u_int map, offset; 1696 1697 lp = l->l_proc->p_lwpctl; 1698 KASSERT(lp != NULL); 1699 1700 lcp = l->l_lcpage; 1701 offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr); 1702 KASSERT(offset < LWPCTL_PER_PAGE); 1703 1704 mutex_enter(&lp->lp_lock); 1705 lcp->lcp_nfree++; 1706 map = offset >> 5; 1707 lcp->lcp_bitmap[map] |= (1 << (offset & 31)); 1708 if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0) 1709 lcp->lcp_rotor = map; 1710 if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) { 1711 TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain); 1712 TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain); 1713 } 1714 mutex_exit(&lp->lp_lock); 1715 } 1716 1717 /* 1718 * Process is exiting; tear down lwpctl state. This can only be safely 1719 * called by the last LWP in the process. 1720 */ 1721 void 1722 lwp_ctl_exit(void) 1723 { 1724 lcpage_t *lcp, *next; 1725 lcproc_t *lp; 1726 proc_t *p; 1727 lwp_t *l; 1728 1729 l = curlwp; 1730 l->l_lwpctl = NULL; 1731 l->l_lcpage = NULL; 1732 p = l->l_proc; 1733 lp = p->p_lwpctl; 1734 1735 KASSERT(lp != NULL); 1736 KASSERT(p->p_nlwps == 1); 1737 1738 for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) { 1739 next = TAILQ_NEXT(lcp, lcp_chain); 1740 uvm_unmap(kernel_map, lcp->lcp_kaddr, 1741 lcp->lcp_kaddr + PAGE_SIZE); 1742 kmem_free(lcp, LWPCTL_LCPAGE_SZ); 1743 } 1744 1745 if (lp->lp_uao != NULL) { 1746 uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva, 1747 lp->lp_uva + LWPCTL_UAREA_SZ); 1748 } 1749 1750 mutex_destroy(&lp->lp_lock); 1751 kmem_free(lp, sizeof(*lp)); 1752 p->p_lwpctl = NULL; 1753 } 1754 1755 /* 1756 * Return the current LWP's "preemption counter". Used to detect 1757 * preemption across operations that can tolerate preemption without 1758 * crashing, but which may generate incorrect results if preempted. 1759 */ 1760 uint64_t 1761 lwp_pctr(void) 1762 { 1763 1764 return curlwp->l_ncsw; 1765 } 1766 1767 #if defined(DDB) 1768 void 1769 lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1770 { 1771 lwp_t *l; 1772 1773 LIST_FOREACH(l, &alllwp, l_list) { 1774 uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l); 1775 1776 if (addr < stack || stack + KSTACK_SIZE <= addr) { 1777 continue; 1778 } 1779 (*pr)("%p is %p+%zu, LWP %p's stack\n", 1780 (void *)addr, (void *)stack, 1781 (size_t)(addr - stack), l); 1782 } 1783 } 1784 #endif /* defined(DDB) */ 1785