1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/proc.h> 36 #include <sys/vnode.h> 37 #include <sys/jail.h> 38 #include <sys/filedesc.h> 39 #include <sys/tty.h> 40 #include <sys/dsched.h> 41 #include <sys/signalvar.h> 42 #include <sys/spinlock.h> 43 #include <sys/random.h> 44 #include <sys/exec.h> 45 #include <vm/vm.h> 46 #include <sys/lock.h> 47 #include <sys/kinfo.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_map.h> 50 #include <machine/smp.h> 51 52 #include <sys/refcount.h> 53 #include <sys/spinlock2.h> 54 55 /* 56 * Hash table size must be a power of two and is not currently dynamically 57 * sized. There is a trade-off between the linear scans which must iterate 58 * all HSIZE elements and the number of elements which might accumulate 59 * within each hash chain. 60 */ 61 #define ALLPROC_HSIZE 256 62 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1) 63 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK) 64 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK) 65 #define SESS_HASH(pid) (pid & ALLPROC_HMASK) 66 67 /* 68 * pid_doms[] management, used to control how quickly a PID can be recycled. 69 * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops. 70 * 71 * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change 72 * the array from int8_t's to int16_t's. 73 */ 74 #define PIDDOM_COUNT 10 /* 10 pids per domain - reduce array size */ 75 #define PIDDOM_DELAY 10 /* min 10 seconds after exit before reuse */ 76 #define PIDDOM_SCALE 10 /* (10,000*SCALE)/sec performance guarantee */ 77 #define PIDSEL_DOMAINS rounddown(PID_MAX * PIDDOM_SCALE / PIDDOM_COUNT, ALLPROC_HSIZE) 78 79 /* Used by libkvm */ 80 int allproc_hsize = ALLPROC_HSIZE; 81 82 LIST_HEAD(pidhashhead, proc); 83 84 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 85 MALLOC_DEFINE(M_SESSION, "session", "session header"); 86 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 87 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 88 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 89 MALLOC_DEFINE(M_UPMAP, "upmap", "upmap/kpmap/lpmap structures"); 90 91 int ps_showallprocs = 1; 92 static int ps_showallthreads = 1; 93 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 94 &ps_showallprocs, 0, 95 "Unprivileged processes can see processes with different UID/GID"); 96 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 97 &ps_showallthreads, 0, 98 "Unprivileged processes can see kernel threads"); 99 static u_int pid_domain_skips; 100 SYSCTL_UINT(_kern, OID_AUTO, pid_domain_skips, CTLFLAG_RW, 101 &pid_domain_skips, 0, 102 "Number of pid_doms[] skipped"); 103 static u_int pid_inner_skips; 104 SYSCTL_UINT(_kern, OID_AUTO, pid_inner_skips, CTLFLAG_RW, 105 &pid_inner_skips, 0, 106 "Number of pid_doms[] skipped"); 107 108 static void orphanpg(struct pgrp *pg); 109 static void proc_makepid(struct proc *p, int random_offset); 110 111 /* 112 * Process related lists (for proc_token, allproc, allpgrp, and allsess) 113 */ 114 typedef struct procglob procglob_t; 115 116 static procglob_t procglob[ALLPROC_HSIZE]; 117 118 /* 119 * We try our best to avoid recycling a PID too quickly. We do this by 120 * storing (uint8_t)time_second in the related pid domain on-reap and then 121 * using that to skip-over the domain on-allocate. 122 * 123 * This array has to be fairly large to support a high fork/exec rate. 124 * A ~100,000 entry array will support a 10-second reuse latency at 125 * 10,000 execs/second, worst case. Best-case multiply by PIDDOM_COUNT 126 * (approximately 100,000 execs/second). 127 * 128 * Currently we allocate around a megabyte, making the worst-case fork 129 * rate around 100,000/second. 130 */ 131 static uint8_t *pid_doms; 132 133 /* 134 * Random component to nextpid generation. We mix in a random factor to make 135 * it a little harder to predict. We sanity check the modulus value to avoid 136 * doing it in critical paths. Don't let it be too small or we pointlessly 137 * waste randomness entropy, and don't let it be impossibly large. Using a 138 * modulus that is too big causes a LOT more process table scans and slows 139 * down fork processing as the pidchecked caching is defeated. 140 */ 141 static int randompid = 0; 142 143 static __inline 144 struct ucred * 145 pcredcache(struct ucred *cr, struct proc *p) 146 { 147 if (cr != p->p_ucred) { 148 if (cr) 149 crfree(cr); 150 spin_lock(&p->p_spin); 151 if ((cr = p->p_ucred) != NULL) 152 crhold(cr); 153 spin_unlock(&p->p_spin); 154 } 155 return cr; 156 } 157 158 /* 159 * No requirements. 160 */ 161 static int 162 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 163 { 164 int error, pid; 165 166 pid = randompid; 167 error = sysctl_handle_int(oidp, &pid, 0, req); 168 if (error || !req->newptr) 169 return (error); 170 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 171 pid = PID_MAX - 100; 172 else if (pid < 2) /* NOP */ 173 pid = 0; 174 else if (pid < 100) /* Make it reasonable */ 175 pid = 100; 176 randompid = pid; 177 return (error); 178 } 179 180 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 181 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 182 183 /* 184 * Initialize global process hashing structures. 185 * 186 * These functions are ONLY called from the low level boot code and do 187 * not lock their operations. 188 */ 189 void 190 procinit(void) 191 { 192 u_long i; 193 194 /* 195 * Allocate dynamically. This array can be large (~1MB) so don't 196 * waste boot loader space. 197 */ 198 pid_doms = kmalloc(sizeof(pid_doms[0]) * PIDSEL_DOMAINS, 199 M_PROC, M_WAITOK | M_ZERO); 200 201 /* 202 * Avoid unnecessary stalls due to pid_doms[] values all being 203 * the same. Make sure that the allocation of pid 1 and pid 2 204 * succeeds. 205 */ 206 for (i = 0; i < PIDSEL_DOMAINS; ++i) 207 pid_doms[i] = (int8_t)i - (int8_t)(PIDDOM_DELAY + 1); 208 209 /* 210 * Other misc init. 211 */ 212 for (i = 0; i < ALLPROC_HSIZE; ++i) { 213 procglob_t *prg = &procglob[i]; 214 LIST_INIT(&prg->allproc); 215 LIST_INIT(&prg->allsess); 216 LIST_INIT(&prg->allpgrp); 217 lwkt_token_init(&prg->proc_token, "allproc"); 218 } 219 uihashinit(); 220 } 221 222 void 223 procinsertinit(struct proc *p) 224 { 225 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(p->p_pid)].allproc, 226 p, p_list); 227 } 228 229 void 230 pgrpinsertinit(struct pgrp *pg) 231 { 232 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(pg->pg_id)].allpgrp, 233 pg, pg_list); 234 } 235 236 void 237 sessinsertinit(struct session *sess) 238 { 239 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(sess->s_sid)].allsess, 240 sess, s_list); 241 } 242 243 /* 244 * Process hold/release support functions. Called via the PHOLD(), 245 * PRELE(), and PSTALL() macros. 246 * 247 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 248 * is issued unless someone is actually waiting for the process. 249 * 250 * Most holds are short-term, allowing a process scan or other similar 251 * operation to access a proc structure without it getting ripped out from 252 * under us. procfs and process-list sysctl ops also use the hold function 253 * interlocked with various p_flags to keep the vmspace intact when reading 254 * or writing a user process's address space. 255 * 256 * There are two situations where a hold count can be longer. Exiting lwps 257 * hold the process until the lwp is reaped, and the parent will hold the 258 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 259 * 260 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 261 * various critical points in the fork/exec and exit paths before proceeding. 262 */ 263 #define PLOCK_ZOMB 0x20000000 264 #define PLOCK_WAITING 0x40000000 265 #define PLOCK_MASK 0x1FFFFFFF 266 267 void 268 pstall(struct proc *p, const char *wmesg, int count) 269 { 270 int o; 271 int n; 272 273 for (;;) { 274 o = p->p_lock; 275 cpu_ccfence(); 276 if ((o & PLOCK_MASK) <= count) 277 break; 278 n = o | PLOCK_WAITING; 279 tsleep_interlock(&p->p_lock, 0); 280 281 /* 282 * If someone is trying to single-step the process during 283 * an exec or an exit they can deadlock us because procfs 284 * sleeps with the process held. 285 */ 286 if (p->p_stops) { 287 if (p->p_flags & P_INEXEC) { 288 wakeup(&p->p_stype); 289 } else if (p->p_flags & P_POSTEXIT) { 290 spin_lock(&p->p_spin); 291 p->p_stops = 0; 292 p->p_step = 0; 293 spin_unlock(&p->p_spin); 294 wakeup(&p->p_stype); 295 } 296 } 297 298 if (atomic_cmpset_int(&p->p_lock, o, n)) { 299 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 300 } 301 } 302 } 303 304 void 305 phold(struct proc *p) 306 { 307 atomic_add_int(&p->p_lock, 1); 308 } 309 310 /* 311 * WARNING! On last release (p) can become instantly invalid due to 312 * MP races. 313 */ 314 void 315 prele(struct proc *p) 316 { 317 int o; 318 int n; 319 320 /* 321 * Fast path 322 */ 323 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 324 return; 325 326 /* 327 * Slow path 328 */ 329 for (;;) { 330 o = p->p_lock; 331 KKASSERT((o & PLOCK_MASK) > 0); 332 cpu_ccfence(); 333 n = (o - 1) & ~PLOCK_WAITING; 334 if (atomic_cmpset_int(&p->p_lock, o, n)) { 335 if (o & PLOCK_WAITING) 336 wakeup(&p->p_lock); 337 break; 338 } 339 } 340 } 341 342 /* 343 * Hold and flag serialized for zombie reaping purposes. 344 * 345 * This function will fail if it has to block, returning non-zero with 346 * neither the flag set or the hold count bumped. Note that (p) may 347 * not be valid in this case if the caller does not have some other 348 * reference on (p). 349 * 350 * This function does not block on other PHOLD()s, only on other 351 * PHOLDZOMB()s. 352 * 353 * Zero is returned on success. The hold count will be incremented and 354 * the serialization flag acquired. Note that serialization is only against 355 * other pholdzomb() calls, not against phold() calls. 356 */ 357 int 358 pholdzomb(struct proc *p) 359 { 360 int o; 361 int n; 362 363 /* 364 * Fast path 365 */ 366 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 367 return(0); 368 369 /* 370 * Slow path 371 */ 372 for (;;) { 373 o = p->p_lock; 374 cpu_ccfence(); 375 if ((o & PLOCK_ZOMB) == 0) { 376 n = (o + 1) | PLOCK_ZOMB; 377 if (atomic_cmpset_int(&p->p_lock, o, n)) 378 return(0); 379 } else { 380 KKASSERT((o & PLOCK_MASK) > 0); 381 n = o | PLOCK_WAITING; 382 tsleep_interlock(&p->p_lock, 0); 383 if (atomic_cmpset_int(&p->p_lock, o, n)) { 384 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 385 /* (p) can be ripped out at this point */ 386 return(1); 387 } 388 } 389 } 390 } 391 392 /* 393 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 394 * 395 * WARNING! On last release (p) can become instantly invalid due to 396 * MP races. 397 */ 398 void 399 prelezomb(struct proc *p) 400 { 401 int o; 402 int n; 403 404 /* 405 * Fast path 406 */ 407 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 408 return; 409 410 /* 411 * Slow path 412 */ 413 KKASSERT(p->p_lock & PLOCK_ZOMB); 414 for (;;) { 415 o = p->p_lock; 416 KKASSERT((o & PLOCK_MASK) > 0); 417 cpu_ccfence(); 418 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 419 if (atomic_cmpset_int(&p->p_lock, o, n)) { 420 if (o & PLOCK_WAITING) 421 wakeup(&p->p_lock); 422 break; 423 } 424 } 425 } 426 427 /* 428 * Is p an inferior of the current process? 429 * 430 * No requirements. 431 */ 432 int 433 inferior(struct proc *p) 434 { 435 struct proc *p2; 436 437 PHOLD(p); 438 lwkt_gettoken_shared(&p->p_token); 439 while (p != curproc) { 440 if (p->p_pid == 0) { 441 lwkt_reltoken(&p->p_token); 442 return (0); 443 } 444 p2 = p->p_pptr; 445 PHOLD(p2); 446 lwkt_reltoken(&p->p_token); 447 PRELE(p); 448 lwkt_gettoken_shared(&p2->p_token); 449 p = p2; 450 } 451 lwkt_reltoken(&p->p_token); 452 PRELE(p); 453 454 return (1); 455 } 456 457 /* 458 * Locate a process by number. The returned process will be referenced and 459 * must be released with PRELE(). 460 * 461 * No requirements. 462 */ 463 struct proc * 464 pfind(pid_t pid) 465 { 466 struct proc *p = curproc; 467 procglob_t *prg; 468 int n; 469 470 /* 471 * Shortcut the current process 472 */ 473 if (p && p->p_pid == pid) { 474 PHOLD(p); 475 return (p); 476 } 477 478 /* 479 * Otherwise find it in the hash table. 480 */ 481 n = ALLPROC_HASH(pid); 482 prg = &procglob[n]; 483 484 lwkt_gettoken_shared(&prg->proc_token); 485 LIST_FOREACH(p, &prg->allproc, p_list) { 486 if (p->p_stat == SZOMB) 487 continue; 488 if (p->p_pid == pid) { 489 PHOLD(p); 490 lwkt_reltoken(&prg->proc_token); 491 return (p); 492 } 493 } 494 lwkt_reltoken(&prg->proc_token); 495 496 return (NULL); 497 } 498 499 /* 500 * Locate a process by number. The returned process is NOT referenced. 501 * The result will not be stable and is typically only used to validate 502 * against a process that the caller has in-hand. 503 * 504 * No requirements. 505 */ 506 struct proc * 507 pfindn(pid_t pid) 508 { 509 struct proc *p = curproc; 510 procglob_t *prg; 511 int n; 512 513 /* 514 * Shortcut the current process 515 */ 516 if (p && p->p_pid == pid) 517 return (p); 518 519 /* 520 * Otherwise find it in the hash table. 521 */ 522 n = ALLPROC_HASH(pid); 523 prg = &procglob[n]; 524 525 lwkt_gettoken_shared(&prg->proc_token); 526 LIST_FOREACH(p, &prg->allproc, p_list) { 527 if (p->p_stat == SZOMB) 528 continue; 529 if (p->p_pid == pid) { 530 lwkt_reltoken(&prg->proc_token); 531 return (p); 532 } 533 } 534 lwkt_reltoken(&prg->proc_token); 535 536 return (NULL); 537 } 538 539 /* 540 * Locate a process on the zombie list. Return a process or NULL. 541 * The returned process will be referenced and the caller must release 542 * it with PRELE(). 543 * 544 * No other requirements. 545 */ 546 struct proc * 547 zpfind(pid_t pid) 548 { 549 struct proc *p = curproc; 550 procglob_t *prg; 551 int n; 552 553 /* 554 * Shortcut the current process 555 */ 556 if (p && p->p_pid == pid) { 557 PHOLD(p); 558 return (p); 559 } 560 561 /* 562 * Otherwise find it in the hash table. 563 */ 564 n = ALLPROC_HASH(pid); 565 prg = &procglob[n]; 566 567 lwkt_gettoken_shared(&prg->proc_token); 568 LIST_FOREACH(p, &prg->allproc, p_list) { 569 if (p->p_stat != SZOMB) 570 continue; 571 if (p->p_pid == pid) { 572 PHOLD(p); 573 lwkt_reltoken(&prg->proc_token); 574 return (p); 575 } 576 } 577 lwkt_reltoken(&prg->proc_token); 578 579 return (NULL); 580 } 581 582 583 void 584 pgref(struct pgrp *pgrp) 585 { 586 refcount_acquire(&pgrp->pg_refs); 587 } 588 589 void 590 pgrel(struct pgrp *pgrp) 591 { 592 procglob_t *prg; 593 int count; 594 int n; 595 596 n = PGRP_HASH(pgrp->pg_id); 597 prg = &procglob[n]; 598 599 for (;;) { 600 count = pgrp->pg_refs; 601 cpu_ccfence(); 602 KKASSERT(count > 0); 603 if (count == 1) { 604 lwkt_gettoken(&prg->proc_token); 605 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0)) 606 break; 607 lwkt_reltoken(&prg->proc_token); 608 /* retry */ 609 } else { 610 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1)) 611 return; 612 /* retry */ 613 } 614 } 615 616 /* 617 * Successful 1->0 transition, pghash_spin is held. 618 */ 619 LIST_REMOVE(pgrp, pg_list); 620 if (pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] != (uint8_t)time_second) 621 pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] = (uint8_t)time_second; 622 623 /* 624 * Reset any sigio structures pointing to us as a result of 625 * F_SETOWN with our pgid. 626 */ 627 funsetownlst(&pgrp->pg_sigiolst); 628 629 if (pgrp->pg_session->s_ttyp != NULL && 630 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { 631 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 632 } 633 lwkt_reltoken(&prg->proc_token); 634 635 sess_rele(pgrp->pg_session); 636 kfree(pgrp, M_PGRP); 637 } 638 639 /* 640 * Locate a process group by number. The returned process group will be 641 * referenced w/pgref() and must be released with pgrel() (or assigned 642 * somewhere if you wish to keep the reference). 643 * 644 * No requirements. 645 */ 646 struct pgrp * 647 pgfind(pid_t pgid) 648 { 649 struct pgrp *pgrp; 650 procglob_t *prg; 651 int n; 652 653 n = PGRP_HASH(pgid); 654 prg = &procglob[n]; 655 lwkt_gettoken_shared(&prg->proc_token); 656 657 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) { 658 if (pgrp->pg_id == pgid) { 659 refcount_acquire(&pgrp->pg_refs); 660 lwkt_reltoken(&prg->proc_token); 661 return (pgrp); 662 } 663 } 664 lwkt_reltoken(&prg->proc_token); 665 return (NULL); 666 } 667 668 /* 669 * Move p to a new or existing process group (and session) 670 * 671 * No requirements. 672 */ 673 int 674 enterpgrp(struct proc *p, pid_t pgid, int mksess) 675 { 676 struct pgrp *pgrp; 677 struct pgrp *opgrp; 678 int error; 679 680 pgrp = pgfind(pgid); 681 682 KASSERT(pgrp == NULL || !mksess, 683 ("enterpgrp: setsid into non-empty pgrp")); 684 KASSERT(!SESS_LEADER(p), 685 ("enterpgrp: session leader attempted setpgrp")); 686 687 if (pgrp == NULL) { 688 pid_t savepid = p->p_pid; 689 struct proc *np; 690 procglob_t *prg; 691 int n; 692 693 /* 694 * new process group 695 */ 696 KASSERT(p->p_pid == pgid, 697 ("enterpgrp: new pgrp and pid != pgid")); 698 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO); 699 pgrp->pg_id = pgid; 700 LIST_INIT(&pgrp->pg_members); 701 pgrp->pg_jobc = 0; 702 SLIST_INIT(&pgrp->pg_sigiolst); 703 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 704 refcount_init(&pgrp->pg_refs, 1); 705 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 706 707 n = PGRP_HASH(pgid); 708 prg = &procglob[n]; 709 710 if ((np = pfindn(savepid)) == NULL || np != p) { 711 lwkt_reltoken(&prg->proc_token); 712 error = ESRCH; 713 kfree(pgrp, M_PGRP); 714 goto fatal; 715 } 716 717 lwkt_gettoken(&prg->proc_token); 718 if (mksess) { 719 struct session *sess; 720 721 /* 722 * new session 723 */ 724 sess = kmalloc(sizeof(struct session), M_SESSION, 725 M_WAITOK | M_ZERO); 726 lwkt_gettoken(&p->p_token); 727 sess->s_prg = prg; 728 sess->s_leader = p; 729 sess->s_sid = p->p_pid; 730 sess->s_count = 1; 731 sess->s_ttyvp = NULL; 732 sess->s_ttyp = NULL; 733 bcopy(p->p_session->s_login, sess->s_login, 734 sizeof(sess->s_login)); 735 pgrp->pg_session = sess; 736 KASSERT(p == curproc, 737 ("enterpgrp: mksession and p != curproc")); 738 p->p_flags &= ~P_CONTROLT; 739 LIST_INSERT_HEAD(&prg->allsess, sess, s_list); 740 lwkt_reltoken(&p->p_token); 741 } else { 742 lwkt_gettoken(&p->p_token); 743 pgrp->pg_session = p->p_session; 744 sess_hold(pgrp->pg_session); 745 lwkt_reltoken(&p->p_token); 746 } 747 LIST_INSERT_HEAD(&prg->allpgrp, pgrp, pg_list); 748 749 lwkt_reltoken(&prg->proc_token); 750 } else if (pgrp == p->p_pgrp) { 751 pgrel(pgrp); 752 goto done; 753 } /* else pgfind() referenced the pgrp */ 754 755 lwkt_gettoken(&pgrp->pg_token); 756 lwkt_gettoken(&p->p_token); 757 758 /* 759 * Replace p->p_pgrp, handling any races that occur. 760 */ 761 while ((opgrp = p->p_pgrp) != NULL) { 762 pgref(opgrp); 763 lwkt_gettoken(&opgrp->pg_token); 764 if (opgrp != p->p_pgrp) { 765 lwkt_reltoken(&opgrp->pg_token); 766 pgrel(opgrp); 767 continue; 768 } 769 LIST_REMOVE(p, p_pglist); 770 break; 771 } 772 p->p_pgrp = pgrp; 773 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 774 775 /* 776 * Adjust eligibility of affected pgrps to participate in job control. 777 * Increment eligibility counts before decrementing, otherwise we 778 * could reach 0 spuriously during the first call. 779 */ 780 fixjobc(p, pgrp, 1); 781 if (opgrp) { 782 fixjobc(p, opgrp, 0); 783 lwkt_reltoken(&opgrp->pg_token); 784 pgrel(opgrp); /* manual pgref */ 785 pgrel(opgrp); /* p->p_pgrp ref */ 786 } 787 lwkt_reltoken(&p->p_token); 788 lwkt_reltoken(&pgrp->pg_token); 789 done: 790 error = 0; 791 fatal: 792 return (error); 793 } 794 795 /* 796 * Remove process from process group 797 * 798 * No requirements. 799 */ 800 int 801 leavepgrp(struct proc *p) 802 { 803 struct pgrp *pg = p->p_pgrp; 804 805 lwkt_gettoken(&p->p_token); 806 while ((pg = p->p_pgrp) != NULL) { 807 pgref(pg); 808 lwkt_gettoken(&pg->pg_token); 809 if (p->p_pgrp != pg) { 810 lwkt_reltoken(&pg->pg_token); 811 pgrel(pg); 812 continue; 813 } 814 p->p_pgrp = NULL; 815 LIST_REMOVE(p, p_pglist); 816 lwkt_reltoken(&pg->pg_token); 817 pgrel(pg); /* manual pgref */ 818 pgrel(pg); /* p->p_pgrp ref */ 819 break; 820 } 821 lwkt_reltoken(&p->p_token); 822 823 return (0); 824 } 825 826 /* 827 * Adjust the ref count on a session structure. When the ref count falls to 828 * zero the tty is disassociated from the session and the session structure 829 * is freed. Note that tty assocation is not itself ref-counted. 830 * 831 * No requirements. 832 */ 833 void 834 sess_hold(struct session *sp) 835 { 836 atomic_add_int(&sp->s_count, 1); 837 } 838 839 /* 840 * No requirements. 841 */ 842 void 843 sess_rele(struct session *sess) 844 { 845 procglob_t *prg; 846 struct tty *tp; 847 int count; 848 int n; 849 850 n = SESS_HASH(sess->s_sid); 851 prg = &procglob[n]; 852 853 for (;;) { 854 count = sess->s_count; 855 cpu_ccfence(); 856 KKASSERT(count > 0); 857 if (count == 1) { 858 lwkt_gettoken(&prg->proc_token); 859 if (atomic_cmpset_int(&sess->s_count, 1, 0)) 860 break; 861 lwkt_reltoken(&prg->proc_token); 862 /* retry */ 863 } else { 864 if (atomic_cmpset_int(&sess->s_count, count, count - 1)) 865 return; 866 /* retry */ 867 } 868 } 869 870 /* 871 * Successful 1->0 transition and prg->proc_token is held. 872 */ 873 LIST_REMOVE(sess, s_list); 874 if (pid_doms[sess->s_sid % PIDSEL_DOMAINS] != (uint8_t)time_second) 875 pid_doms[sess->s_sid % PIDSEL_DOMAINS] = (uint8_t)time_second; 876 877 if (sess->s_ttyp && sess->s_ttyp->t_session) { 878 #ifdef TTY_DO_FULL_CLOSE 879 /* FULL CLOSE, see ttyclearsession() */ 880 KKASSERT(sess->s_ttyp->t_session == sess); 881 sess->s_ttyp->t_session = NULL; 882 #else 883 /* HALF CLOSE, see ttyclearsession() */ 884 if (sess->s_ttyp->t_session == sess) 885 sess->s_ttyp->t_session = NULL; 886 #endif 887 } 888 if ((tp = sess->s_ttyp) != NULL) { 889 sess->s_ttyp = NULL; 890 ttyunhold(tp); 891 } 892 lwkt_reltoken(&prg->proc_token); 893 894 kfree(sess, M_SESSION); 895 } 896 897 /* 898 * Adjust pgrp jobc counters when specified process changes process group. 899 * We count the number of processes in each process group that "qualify" 900 * the group for terminal job control (those with a parent in a different 901 * process group of the same session). If that count reaches zero, the 902 * process group becomes orphaned. Check both the specified process' 903 * process group and that of its children. 904 * entering == 0 => p is leaving specified group. 905 * entering == 1 => p is entering specified group. 906 * 907 * No requirements. 908 */ 909 void 910 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 911 { 912 struct pgrp *hispgrp; 913 struct session *mysession; 914 struct proc *np; 915 916 /* 917 * Check p's parent to see whether p qualifies its own process 918 * group; if so, adjust count for p's process group. 919 */ 920 lwkt_gettoken(&p->p_token); /* p_children scan */ 921 lwkt_gettoken(&pgrp->pg_token); 922 923 mysession = pgrp->pg_session; 924 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 925 hispgrp->pg_session == mysession) { 926 if (entering) 927 pgrp->pg_jobc++; 928 else if (--pgrp->pg_jobc == 0) 929 orphanpg(pgrp); 930 } 931 932 /* 933 * Check this process' children to see whether they qualify 934 * their process groups; if so, adjust counts for children's 935 * process groups. 936 */ 937 LIST_FOREACH(np, &p->p_children, p_sibling) { 938 PHOLD(np); 939 lwkt_gettoken(&np->p_token); 940 if ((hispgrp = np->p_pgrp) != pgrp && 941 hispgrp->pg_session == mysession && 942 np->p_stat != SZOMB) { 943 pgref(hispgrp); 944 lwkt_gettoken(&hispgrp->pg_token); 945 if (entering) 946 hispgrp->pg_jobc++; 947 else if (--hispgrp->pg_jobc == 0) 948 orphanpg(hispgrp); 949 lwkt_reltoken(&hispgrp->pg_token); 950 pgrel(hispgrp); 951 } 952 lwkt_reltoken(&np->p_token); 953 PRELE(np); 954 } 955 KKASSERT(pgrp->pg_refs > 0); 956 lwkt_reltoken(&pgrp->pg_token); 957 lwkt_reltoken(&p->p_token); 958 } 959 960 /* 961 * A process group has become orphaned; 962 * if there are any stopped processes in the group, 963 * hang-up all process in that group. 964 * 965 * The caller must hold pg_token. 966 */ 967 static void 968 orphanpg(struct pgrp *pg) 969 { 970 struct proc *p; 971 972 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 973 if (p->p_stat == SSTOP) { 974 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 975 ksignal(p, SIGHUP); 976 ksignal(p, SIGCONT); 977 } 978 return; 979 } 980 } 981 } 982 983 /* 984 * Add a new process to the allproc list and the PID hash. This 985 * also assigns a pid to the new process. 986 * 987 * No requirements. 988 */ 989 void 990 proc_add_allproc(struct proc *p) 991 { 992 int random_offset; 993 994 if ((random_offset = randompid) != 0) { 995 read_random(&random_offset, sizeof(random_offset)); 996 random_offset = (random_offset & 0x7FFFFFFF) % randompid; 997 } 998 proc_makepid(p, random_offset); 999 } 1000 1001 /* 1002 * Calculate a new process pid. This function is integrated into 1003 * proc_add_allproc() to guarentee that the new pid is not reused before 1004 * the new process can be added to the allproc list. 1005 * 1006 * p_pid is assigned and the process is added to the allproc hash table 1007 * 1008 * WARNING! We need to allocate PIDs sequentially during early boot. 1009 * In particular, init needs to have a pid of 1. 1010 */ 1011 static 1012 void 1013 proc_makepid(struct proc *p, int random_offset) 1014 { 1015 static pid_t nextpid = 1; /* heuristic, allowed to race */ 1016 procglob_t *prg; 1017 struct pgrp *pg; 1018 struct proc *ps; 1019 struct session *sess; 1020 pid_t base; 1021 int8_t delta8; 1022 int retries; 1023 int n; 1024 1025 /* 1026 * Select the next pid base candidate. 1027 * 1028 * Check cyclement, do not allow a pid < 100. 1029 */ 1030 retries = 0; 1031 retry: 1032 base = atomic_fetchadd_int(&nextpid, 1) + random_offset; 1033 if (base <= 0 || base >= PID_MAX) { 1034 base = base % PID_MAX; 1035 if (base < 0) 1036 base = 100; 1037 if (base < 100) 1038 base += 100; 1039 nextpid = base; /* reset (SMP race ok) */ 1040 } 1041 1042 /* 1043 * Do not allow a base pid to be selected from a domain that has 1044 * recently seen a pid/pgid/sessid reap. Sleep a little if we looped 1045 * through all available domains. 1046 * 1047 * WARNING: We want the early pids to be allocated linearly, 1048 * particularly pid 1 and pid 2. 1049 */ 1050 if (++retries >= PIDSEL_DOMAINS) 1051 tsleep(&nextpid, 0, "makepid", 1); 1052 if (base >= 100) { 1053 delta8 = (int8_t)time_second - 1054 (int8_t)pid_doms[base % PIDSEL_DOMAINS]; 1055 if (delta8 >= 0 && delta8 <= PIDDOM_DELAY) { 1056 ++pid_domain_skips; 1057 goto retry; 1058 } 1059 } 1060 1061 /* 1062 * Calculate a hash index and find an unused process id within 1063 * the table, looping if we cannot find one. 1064 * 1065 * The inner loop increments by ALLPROC_HSIZE which keeps the 1066 * PID at the same pid_doms[] index as well as the same hash index. 1067 */ 1068 n = ALLPROC_HASH(base); 1069 prg = &procglob[n]; 1070 lwkt_gettoken(&prg->proc_token); 1071 1072 restart1: 1073 LIST_FOREACH(ps, &prg->allproc, p_list) { 1074 if (ps->p_pid == base) { 1075 base += ALLPROC_HSIZE; 1076 if (base >= PID_MAX) { 1077 lwkt_reltoken(&prg->proc_token); 1078 goto retry; 1079 } 1080 ++pid_inner_skips; 1081 goto restart1; 1082 } 1083 } 1084 LIST_FOREACH(pg, &prg->allpgrp, pg_list) { 1085 if (pg->pg_id == base) { 1086 base += ALLPROC_HSIZE; 1087 if (base >= PID_MAX) { 1088 lwkt_reltoken(&prg->proc_token); 1089 goto retry; 1090 } 1091 ++pid_inner_skips; 1092 goto restart1; 1093 } 1094 } 1095 LIST_FOREACH(sess, &prg->allsess, s_list) { 1096 if (sess->s_sid == base) { 1097 base += ALLPROC_HSIZE; 1098 if (base >= PID_MAX) { 1099 lwkt_reltoken(&prg->proc_token); 1100 goto retry; 1101 } 1102 ++pid_inner_skips; 1103 goto restart1; 1104 } 1105 } 1106 1107 /* 1108 * Assign the pid and insert the process. 1109 */ 1110 p->p_pid = base; 1111 LIST_INSERT_HEAD(&prg->allproc, p, p_list); 1112 lwkt_reltoken(&prg->proc_token); 1113 } 1114 1115 /* 1116 * Called from exit1 to place the process into a zombie state. 1117 * The process is removed from the pid hash and p_stat is set 1118 * to SZOMB. Normal pfind[n]() calls will not find it any more. 1119 * 1120 * Caller must hold p->p_token. We are required to wait until p_lock 1121 * becomes zero before we can manipulate the list, allowing allproc 1122 * scans to guarantee consistency during a list scan. 1123 */ 1124 void 1125 proc_move_allproc_zombie(struct proc *p) 1126 { 1127 procglob_t *prg; 1128 int n; 1129 1130 n = ALLPROC_HASH(p->p_pid); 1131 prg = &procglob[n]; 1132 PSTALL(p, "reap1", 0); 1133 lwkt_gettoken(&prg->proc_token); 1134 1135 PSTALL(p, "reap1a", 0); 1136 p->p_stat = SZOMB; 1137 1138 lwkt_reltoken(&prg->proc_token); 1139 dsched_exit_proc(p); 1140 } 1141 1142 /* 1143 * This routine is called from kern_wait() and will remove the process 1144 * from the zombie list and the sibling list. This routine will block 1145 * if someone has a lock on the proces (p_lock). 1146 * 1147 * Caller must hold p->p_token. We are required to wait until p_lock 1148 * becomes one before we can manipulate the list, allowing allproc 1149 * scans to guarantee consistency during a list scan. 1150 * 1151 * Assumes caller has one ref. 1152 */ 1153 void 1154 proc_remove_zombie(struct proc *p) 1155 { 1156 procglob_t *prg; 1157 int n; 1158 1159 n = ALLPROC_HASH(p->p_pid); 1160 prg = &procglob[n]; 1161 1162 PSTALL(p, "reap2", 1); 1163 lwkt_gettoken(&prg->proc_token); 1164 PSTALL(p, "reap2a", 1); 1165 LIST_REMOVE(p, p_list); /* from remove master list */ 1166 LIST_REMOVE(p, p_sibling); /* and from sibling list */ 1167 p->p_pptr = NULL; 1168 p->p_ppid = 0; 1169 if (pid_doms[p->p_pid % PIDSEL_DOMAINS] != (uint8_t)time_second) 1170 pid_doms[p->p_pid % PIDSEL_DOMAINS] = (uint8_t)time_second; 1171 lwkt_reltoken(&prg->proc_token); 1172 } 1173 1174 /* 1175 * Handle various requirements prior to returning to usermode. Called from 1176 * platform trap and system call code. 1177 */ 1178 void 1179 lwpuserret(struct lwp *lp) 1180 { 1181 struct proc *p = lp->lwp_proc; 1182 1183 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1184 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1185 allocvnode_gc(); 1186 } 1187 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 1188 lwkt_gettoken(&p->p_token); 1189 lwp_exit(0, NULL); 1190 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 1191 } 1192 } 1193 1194 /* 1195 * Kernel threads run from user processes can also accumulate deferred 1196 * actions which need to be acted upon. Callers include: 1197 * 1198 * nfsd - Can allocate lots of vnodes 1199 */ 1200 void 1201 lwpkthreaddeferred(void) 1202 { 1203 struct lwp *lp = curthread->td_lwp; 1204 1205 if (lp) { 1206 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1207 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1208 allocvnode_gc(); 1209 } 1210 } 1211 } 1212 1213 void 1214 proc_usermap(struct proc *p, int invfork) 1215 { 1216 struct sys_upmap *upmap; 1217 1218 lwkt_gettoken(&p->p_token); 1219 upmap = kmalloc(roundup2(sizeof(*upmap), PAGE_SIZE), M_UPMAP, 1220 M_WAITOK | M_ZERO); 1221 if (p->p_upmap == NULL) { 1222 upmap->header[0].type = UKPTYPE_VERSION; 1223 upmap->header[0].offset = offsetof(struct sys_upmap, version); 1224 upmap->header[1].type = UPTYPE_RUNTICKS; 1225 upmap->header[1].offset = offsetof(struct sys_upmap, runticks); 1226 upmap->header[2].type = UPTYPE_FORKID; 1227 upmap->header[2].offset = offsetof(struct sys_upmap, forkid); 1228 upmap->header[3].type = UPTYPE_PID; 1229 upmap->header[3].offset = offsetof(struct sys_upmap, pid); 1230 upmap->header[4].type = UPTYPE_PROC_TITLE; 1231 upmap->header[4].offset = offsetof(struct sys_upmap,proc_title); 1232 upmap->header[5].type = UPTYPE_INVFORK; 1233 upmap->header[5].offset = offsetof(struct sys_upmap, invfork); 1234 1235 upmap->version = UPMAP_VERSION; 1236 upmap->pid = p->p_pid; 1237 upmap->forkid = p->p_forkid; 1238 upmap->invfork = invfork; 1239 p->p_upmap = upmap; 1240 } else { 1241 kfree(upmap, M_UPMAP); 1242 } 1243 lwkt_reltoken(&p->p_token); 1244 } 1245 1246 void 1247 proc_userunmap(struct proc *p) 1248 { 1249 struct sys_upmap *upmap; 1250 1251 lwkt_gettoken(&p->p_token); 1252 if ((upmap = p->p_upmap) != NULL) { 1253 p->p_upmap = NULL; 1254 kfree(upmap, M_UPMAP); 1255 } 1256 lwkt_reltoken(&p->p_token); 1257 } 1258 1259 /* 1260 * Called when the per-thread user/kernel shared page needs to be 1261 * allocated. The function refuses to allocate the page if the 1262 * thread is exiting to avoid races against lwp_userunmap(). 1263 */ 1264 void 1265 lwp_usermap(struct lwp *lp, int invfork) 1266 { 1267 struct sys_lpmap *lpmap; 1268 1269 lwkt_gettoken(&lp->lwp_token); 1270 1271 lpmap = kmalloc(roundup2(sizeof(*lpmap), PAGE_SIZE), M_UPMAP, 1272 M_WAITOK | M_ZERO); 1273 if (lp->lwp_lpmap == NULL && (lp->lwp_mpflags & LWP_MP_WEXIT) == 0) { 1274 lpmap->header[0].type = UKPTYPE_VERSION; 1275 lpmap->header[0].offset = offsetof(struct sys_lpmap, version); 1276 lpmap->header[1].type = LPTYPE_BLOCKALLSIGS; 1277 lpmap->header[1].offset = offsetof(struct sys_lpmap, 1278 blockallsigs); 1279 lpmap->header[2].type = LPTYPE_THREAD_TITLE; 1280 lpmap->header[2].offset = offsetof(struct sys_lpmap, 1281 thread_title); 1282 1283 lpmap->version = LPMAP_VERSION; 1284 lp->lwp_lpmap = lpmap; 1285 } else { 1286 kfree(lpmap, M_UPMAP); 1287 } 1288 lwkt_reltoken(&lp->lwp_token); 1289 } 1290 1291 /* 1292 * Called when a LWP (but not necessarily the whole process) exits. 1293 * Called when a process execs (after all other threads have been killed). 1294 * 1295 * lwp-specific mappings must be removed. If userland didn't do it, then 1296 * we have to. Otherwise we could end-up disclosing kernel memory due to 1297 * the ad-hoc pmap mapping. 1298 */ 1299 void 1300 lwp_userunmap(struct lwp *lp) 1301 { 1302 struct sys_lpmap *lpmap; 1303 struct vm_map *map; 1304 struct vm_map_backing *ba; 1305 struct vm_map_backing copy; 1306 1307 lwkt_gettoken(&lp->lwp_token); 1308 map = &lp->lwp_proc->p_vmspace->vm_map; 1309 lpmap = lp->lwp_lpmap; 1310 lp->lwp_lpmap = NULL; 1311 1312 spin_lock(&lp->lwp_spin); 1313 while ((ba = TAILQ_FIRST(&lp->lwp_lpmap_backing_list)) != NULL) { 1314 copy = *ba; 1315 spin_unlock(&lp->lwp_spin); 1316 1317 lwkt_gettoken(&map->token); 1318 vm_map_remove(map, copy.start, copy.end); 1319 lwkt_reltoken(&map->token); 1320 1321 spin_lock(&lp->lwp_spin); 1322 } 1323 spin_unlock(&lp->lwp_spin); 1324 1325 if (lpmap) 1326 kfree(lpmap, M_UPMAP); 1327 lwkt_reltoken(&lp->lwp_token); 1328 } 1329 1330 /* 1331 * Scan all processes on the allproc list. The process is automatically 1332 * held for the callback. A return value of -1 terminates the loop. 1333 * Zombie procs are skipped. 1334 * 1335 * The callback is made with the process held and proc_token held. 1336 * 1337 * We limit the scan to the number of processes as-of the start of 1338 * the scan so as not to get caught up in an endless loop if new processes 1339 * are created more quickly than we can scan the old ones. Add a little 1340 * slop to try to catch edge cases since nprocs can race. 1341 * 1342 * No requirements. 1343 */ 1344 void 1345 allproc_scan(int (*callback)(struct proc *, void *), void *data, int segmented) 1346 { 1347 int limit = nprocs + ncpus; 1348 struct proc *p; 1349 int ns; 1350 int ne; 1351 int r; 1352 int n; 1353 1354 if (segmented) { 1355 int id = mycpu->gd_cpuid; 1356 ns = id * ALLPROC_HSIZE / ncpus; 1357 ne = (id + 1) * ALLPROC_HSIZE / ncpus; 1358 } else { 1359 ns = 0; 1360 ne = ALLPROC_HSIZE; 1361 } 1362 1363 /* 1364 * prg->proc_token protects the allproc list and PHOLD() prevents the 1365 * process from being removed from the allproc list or the zombproc 1366 * list. 1367 */ 1368 for (n = ns; n < ne; ++n) { 1369 procglob_t *prg = &procglob[n]; 1370 if (LIST_FIRST(&prg->allproc) == NULL) 1371 continue; 1372 lwkt_gettoken(&prg->proc_token); 1373 LIST_FOREACH(p, &prg->allproc, p_list) { 1374 if (p->p_stat == SZOMB) 1375 continue; 1376 PHOLD(p); 1377 r = callback(p, data); 1378 PRELE(p); 1379 if (r < 0) 1380 break; 1381 if (--limit < 0) 1382 break; 1383 } 1384 lwkt_reltoken(&prg->proc_token); 1385 1386 /* 1387 * Check if asked to stop early 1388 */ 1389 if (p) 1390 break; 1391 } 1392 } 1393 1394 /* 1395 * Scan all lwps of processes on the allproc list. The lwp is automatically 1396 * held for the callback. A return value of -1 terminates the loop. 1397 * 1398 * The callback is made with the proces and lwp both held, and proc_token held. 1399 * 1400 * No requirements. 1401 */ 1402 void 1403 alllwp_scan(int (*callback)(struct lwp *, void *), void *data, int segmented) 1404 { 1405 struct proc *p; 1406 struct lwp *lp; 1407 int ns; 1408 int ne; 1409 int r = 0; 1410 int n; 1411 1412 if (segmented) { 1413 int id = mycpu->gd_cpuid; 1414 ns = id * ALLPROC_HSIZE / ncpus; 1415 ne = (id + 1) * ALLPROC_HSIZE / ncpus; 1416 } else { 1417 ns = 0; 1418 ne = ALLPROC_HSIZE; 1419 } 1420 1421 for (n = ns; n < ne; ++n) { 1422 procglob_t *prg = &procglob[n]; 1423 1424 if (LIST_FIRST(&prg->allproc) == NULL) 1425 continue; 1426 lwkt_gettoken(&prg->proc_token); 1427 LIST_FOREACH(p, &prg->allproc, p_list) { 1428 if (p->p_stat == SZOMB) 1429 continue; 1430 PHOLD(p); 1431 lwkt_gettoken(&p->p_token); 1432 FOREACH_LWP_IN_PROC(lp, p) { 1433 LWPHOLD(lp); 1434 r = callback(lp, data); 1435 LWPRELE(lp); 1436 } 1437 lwkt_reltoken(&p->p_token); 1438 PRELE(p); 1439 if (r < 0) 1440 break; 1441 } 1442 lwkt_reltoken(&prg->proc_token); 1443 1444 /* 1445 * Asked to exit early 1446 */ 1447 if (p) 1448 break; 1449 } 1450 } 1451 1452 /* 1453 * Scan all processes on the zombproc list. The process is automatically 1454 * held for the callback. A return value of -1 terminates the loop. 1455 * 1456 * No requirements. 1457 * The callback is made with the proces held and proc_token held. 1458 */ 1459 void 1460 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 1461 { 1462 struct proc *p; 1463 int r; 1464 int n; 1465 1466 /* 1467 * prg->proc_token protects the allproc list and PHOLD() prevents the 1468 * process from being removed from the allproc list or the zombproc 1469 * list. 1470 */ 1471 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1472 procglob_t *prg = &procglob[n]; 1473 1474 if (LIST_FIRST(&prg->allproc) == NULL) 1475 continue; 1476 lwkt_gettoken(&prg->proc_token); 1477 LIST_FOREACH(p, &prg->allproc, p_list) { 1478 if (p->p_stat != SZOMB) 1479 continue; 1480 PHOLD(p); 1481 r = callback(p, data); 1482 PRELE(p); 1483 if (r < 0) 1484 break; 1485 } 1486 lwkt_reltoken(&prg->proc_token); 1487 1488 /* 1489 * Check if asked to stop early 1490 */ 1491 if (p) 1492 break; 1493 } 1494 } 1495 1496 #include "opt_ddb.h" 1497 #ifdef DDB 1498 #include <ddb/ddb.h> 1499 1500 /* 1501 * Debugging only 1502 */ 1503 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 1504 { 1505 struct pgrp *pgrp; 1506 struct proc *p; 1507 procglob_t *prg; 1508 int i; 1509 1510 for (i = 0; i < ALLPROC_HSIZE; ++i) { 1511 prg = &procglob[i]; 1512 1513 if (LIST_EMPTY(&prg->allpgrp)) 1514 continue; 1515 kprintf("\tindx %d\n", i); 1516 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) { 1517 kprintf("\tpgrp %p, pgid %ld, sess %p, " 1518 "sesscnt %d, mem %p\n", 1519 (void *)pgrp, (long)pgrp->pg_id, 1520 (void *)pgrp->pg_session, 1521 pgrp->pg_session->s_count, 1522 (void *)LIST_FIRST(&pgrp->pg_members)); 1523 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1524 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1525 (long)p->p_pid, (void *)p, 1526 (void *)p->p_pgrp); 1527 } 1528 } 1529 } 1530 } 1531 #endif /* DDB */ 1532 1533 /* 1534 * The caller must hold proc_token. 1535 */ 1536 static int 1537 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1538 { 1539 struct kinfo_proc ki; 1540 struct lwp *lp; 1541 int skp = 0, had_output = 0; 1542 int error; 1543 1544 bzero(&ki, sizeof(ki)); 1545 lwkt_gettoken_shared(&p->p_token); 1546 fill_kinfo_proc(p, &ki); 1547 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1548 skp = 1; 1549 error = 0; 1550 FOREACH_LWP_IN_PROC(lp, p) { 1551 LWPHOLD(lp); 1552 fill_kinfo_lwp(lp, &ki.kp_lwp); 1553 had_output = 1; 1554 if (skp == 0) { 1555 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1556 bzero(&ki.kp_lwp, sizeof(ki.kp_lwp)); 1557 } 1558 LWPRELE(lp); 1559 if (error) 1560 break; 1561 } 1562 lwkt_reltoken(&p->p_token); 1563 1564 /* 1565 * If aggregating threads, set the tid field to -1. 1566 */ 1567 if (skp) 1568 ki.kp_lwp.kl_tid = -1; 1569 1570 /* 1571 * We need to output at least the proc, even if there is no lwp. 1572 * If skp is non-zero we aggregated the lwps and need to output 1573 * the result. 1574 */ 1575 if (had_output == 0 || skp) { 1576 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1577 } 1578 return (error); 1579 } 1580 1581 /* 1582 * The caller must hold proc_token. 1583 */ 1584 static int 1585 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req) 1586 { 1587 struct kinfo_proc ki; 1588 int error; 1589 1590 fill_kinfo_proc_kthread(td, &ki); 1591 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1592 if (error) 1593 return error; 1594 return(0); 1595 } 1596 1597 /* 1598 * No requirements. 1599 */ 1600 static int 1601 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1602 { 1603 int *name = (int *)arg1; 1604 int oid = oidp->oid_number; 1605 u_int namelen = arg2; 1606 struct proc *p; 1607 struct thread *td; 1608 struct thread *marker; 1609 int flags = 0; 1610 int error = 0; 1611 int n; 1612 int origcpu; 1613 struct ucred *cr1 = curproc->p_ucred; 1614 struct ucred *crcache = NULL; 1615 1616 flags = oid & KERN_PROC_FLAGMASK; 1617 oid &= ~KERN_PROC_FLAGMASK; 1618 1619 if ((oid == KERN_PROC_ALL && namelen != 0) || 1620 (oid != KERN_PROC_ALL && namelen != 1)) { 1621 return (EINVAL); 1622 } 1623 1624 /* 1625 * proc_token protects the allproc list and PHOLD() prevents the 1626 * process from being removed from the allproc list or the zombproc 1627 * list. 1628 */ 1629 if (oid == KERN_PROC_PID) { 1630 p = pfind((pid_t)name[0]); 1631 if (p) { 1632 crcache = pcredcache(crcache, p); 1633 if (PRISON_CHECK(cr1, crcache)) 1634 error = sysctl_out_proc(p, req, flags); 1635 PRELE(p); 1636 } 1637 goto post_threads; 1638 } 1639 p = NULL; 1640 1641 if (!req->oldptr) { 1642 /* overestimate by 5 procs */ 1643 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1644 if (error) 1645 goto post_threads; 1646 } 1647 1648 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1649 procglob_t *prg = &procglob[n]; 1650 1651 if (LIST_EMPTY(&prg->allproc)) 1652 continue; 1653 lwkt_gettoken_shared(&prg->proc_token); 1654 LIST_FOREACH(p, &prg->allproc, p_list) { 1655 /* 1656 * Show a user only their processes. 1657 */ 1658 if (ps_showallprocs == 0) { 1659 crcache = pcredcache(crcache, p); 1660 if (crcache == NULL || 1661 p_trespass(cr1, crcache)) { 1662 continue; 1663 } 1664 } 1665 1666 /* 1667 * Skip embryonic processes. 1668 */ 1669 if (p->p_stat == SIDL) 1670 continue; 1671 /* 1672 * TODO - make more efficient (see notes below). 1673 * do by session. 1674 */ 1675 switch (oid) { 1676 case KERN_PROC_PGRP: 1677 /* could do this by traversing pgrp */ 1678 if (p->p_pgrp == NULL || 1679 p->p_pgrp->pg_id != (pid_t)name[0]) 1680 continue; 1681 break; 1682 1683 case KERN_PROC_TTY: 1684 if ((p->p_flags & P_CONTROLT) == 0 || 1685 p->p_session == NULL || 1686 p->p_session->s_ttyp == NULL || 1687 dev2udev(p->p_session->s_ttyp->t_dev) != 1688 (udev_t)name[0]) 1689 continue; 1690 break; 1691 1692 case KERN_PROC_UID: 1693 crcache = pcredcache(crcache, p); 1694 if (crcache == NULL || 1695 crcache->cr_uid != (uid_t)name[0]) { 1696 continue; 1697 } 1698 break; 1699 1700 case KERN_PROC_RUID: 1701 crcache = pcredcache(crcache, p); 1702 if (crcache == NULL || 1703 crcache->cr_ruid != (uid_t)name[0]) { 1704 continue; 1705 } 1706 break; 1707 } 1708 1709 crcache = pcredcache(crcache, p); 1710 if (!PRISON_CHECK(cr1, crcache)) 1711 continue; 1712 PHOLD(p); 1713 error = sysctl_out_proc(p, req, flags); 1714 PRELE(p); 1715 if (error) { 1716 lwkt_reltoken(&prg->proc_token); 1717 goto post_threads; 1718 } 1719 } 1720 lwkt_reltoken(&prg->proc_token); 1721 } 1722 1723 /* 1724 * Iterate over all active cpus and scan their thread list. Start 1725 * with the next logical cpu and end with our original cpu. We 1726 * migrate our own thread to each target cpu in order to safely scan 1727 * its thread list. In the last loop we migrate back to our original 1728 * cpu. 1729 */ 1730 origcpu = mycpu->gd_cpuid; 1731 if (!ps_showallthreads || jailed(cr1)) 1732 goto post_threads; 1733 1734 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1735 marker->td_flags = TDF_MARKER; 1736 error = 0; 1737 1738 for (n = 1; n <= ncpus; ++n) { 1739 globaldata_t rgd; 1740 int nid; 1741 1742 nid = (origcpu + n) % ncpus; 1743 if (CPUMASK_TESTBIT(smp_active_mask, nid) == 0) 1744 continue; 1745 rgd = globaldata_find(nid); 1746 lwkt_setcpu_self(rgd); 1747 1748 crit_enter(); 1749 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1750 1751 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1752 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1753 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1754 if (td->td_flags & TDF_MARKER) 1755 continue; 1756 if (td->td_proc) 1757 continue; 1758 1759 lwkt_hold(td); 1760 crit_exit(); 1761 1762 switch (oid) { 1763 case KERN_PROC_PGRP: 1764 case KERN_PROC_TTY: 1765 case KERN_PROC_UID: 1766 case KERN_PROC_RUID: 1767 break; 1768 default: 1769 error = sysctl_out_proc_kthread(td, req); 1770 break; 1771 } 1772 lwkt_rele(td); 1773 crit_enter(); 1774 if (error) 1775 break; 1776 } 1777 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1778 crit_exit(); 1779 1780 if (error) 1781 break; 1782 } 1783 1784 /* 1785 * Userland scheduler expects us to return on the same cpu we 1786 * started on. 1787 */ 1788 if (mycpu->gd_cpuid != origcpu) 1789 lwkt_setcpu_self(globaldata_find(origcpu)); 1790 1791 kfree(marker, M_TEMP); 1792 1793 post_threads: 1794 if (crcache) 1795 crfree(crcache); 1796 return (error); 1797 } 1798 1799 /* 1800 * This sysctl allows a process to retrieve the argument list or process 1801 * title for another process without groping around in the address space 1802 * of the other process. It also allow a process to set its own "process 1803 * title to a string of its own choice. 1804 * 1805 * No requirements. 1806 */ 1807 static int 1808 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1809 { 1810 int *name = (int*) arg1; 1811 u_int namelen = arg2; 1812 struct proc *p; 1813 struct pargs *opa; 1814 struct pargs *pa; 1815 int error = 0; 1816 struct ucred *cr1 = curproc->p_ucred; 1817 1818 if (namelen != 1) 1819 return (EINVAL); 1820 1821 p = pfind((pid_t)name[0]); 1822 if (p == NULL) 1823 goto done; 1824 lwkt_gettoken(&p->p_token); 1825 1826 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1827 goto done; 1828 1829 if (req->newptr && curproc != p) { 1830 error = EPERM; 1831 goto done; 1832 } 1833 if (req->oldptr) { 1834 if (p->p_upmap != NULL && p->p_upmap->proc_title[0]) { 1835 /* 1836 * Args set via writable user process mmap. 1837 * We must calculate the string length manually 1838 * because the user data can change at any time. 1839 */ 1840 size_t n; 1841 char *base; 1842 1843 base = p->p_upmap->proc_title; 1844 for (n = 0; n < UPMAP_MAXPROCTITLE - 1; ++n) { 1845 if (base[n] == 0) 1846 break; 1847 } 1848 error = SYSCTL_OUT(req, base, n); 1849 if (error == 0) 1850 error = SYSCTL_OUT(req, "", 1); 1851 } else if ((pa = p->p_args) != NULL) { 1852 /* 1853 * Args set by setproctitle() sysctl. 1854 */ 1855 refcount_acquire(&pa->ar_ref); 1856 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1857 if (refcount_release(&pa->ar_ref)) 1858 kfree(pa, M_PARGS); 1859 } 1860 } 1861 if (req->newptr == NULL) 1862 goto done; 1863 1864 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1865 goto done; 1866 } 1867 1868 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK); 1869 refcount_init(&pa->ar_ref, 1); 1870 pa->ar_length = req->newlen; 1871 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1872 if (error) { 1873 kfree(pa, M_PARGS); 1874 goto done; 1875 } 1876 1877 1878 /* 1879 * Replace p_args with the new pa. p_args may have previously 1880 * been NULL. 1881 */ 1882 opa = p->p_args; 1883 p->p_args = pa; 1884 1885 if (opa) { 1886 KKASSERT(opa->ar_ref > 0); 1887 if (refcount_release(&opa->ar_ref)) { 1888 kfree(opa, M_PARGS); 1889 /* opa = NULL; */ 1890 } 1891 } 1892 done: 1893 if (p) { 1894 lwkt_reltoken(&p->p_token); 1895 PRELE(p); 1896 } 1897 return (error); 1898 } 1899 1900 static int 1901 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1902 { 1903 int *name = (int*) arg1; 1904 u_int namelen = arg2; 1905 struct proc *p; 1906 int error = 0; 1907 char *fullpath, *freepath; 1908 struct ucred *cr1 = curproc->p_ucred; 1909 1910 if (namelen != 1) 1911 return (EINVAL); 1912 1913 p = pfind((pid_t)name[0]); 1914 if (p == NULL) 1915 goto done; 1916 lwkt_gettoken_shared(&p->p_token); 1917 1918 /* 1919 * If we are not allowed to see other args, we certainly shouldn't 1920 * get the cwd either. Also check the usual trespassing. 1921 */ 1922 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1923 goto done; 1924 1925 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 1926 struct nchandle nch; 1927 1928 cache_copy(&p->p_fd->fd_ncdir, &nch); 1929 error = cache_fullpath(p, &nch, NULL, 1930 &fullpath, &freepath, 0); 1931 cache_drop(&nch); 1932 if (error) 1933 goto done; 1934 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 1935 kfree(freepath, M_TEMP); 1936 } 1937 1938 done: 1939 if (p) { 1940 lwkt_reltoken(&p->p_token); 1941 PRELE(p); 1942 } 1943 return (error); 1944 } 1945 1946 /* 1947 * This sysctl allows a process to retrieve the path of the executable for 1948 * itself or another process. 1949 */ 1950 static int 1951 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1952 { 1953 pid_t *pidp = (pid_t *)arg1; 1954 unsigned int arglen = arg2; 1955 struct proc *p; 1956 char *retbuf, *freebuf; 1957 int error = 0; 1958 struct nchandle nch; 1959 1960 if (arglen != 1) 1961 return (EINVAL); 1962 if (*pidp == -1) { /* -1 means this process */ 1963 p = curproc; 1964 } else { 1965 p = pfind(*pidp); 1966 if (p == NULL) 1967 return (ESRCH); 1968 } 1969 1970 cache_copy(&p->p_textnch, &nch); 1971 error = cache_fullpath(p, &nch, NULL, &retbuf, &freebuf, 0); 1972 cache_drop(&nch); 1973 if (error) 1974 goto done; 1975 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1976 kfree(freebuf, M_TEMP); 1977 done: 1978 if (*pidp != -1) 1979 PRELE(p); 1980 1981 return (error); 1982 } 1983 1984 static int 1985 sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS) 1986 { 1987 /*int *name = (int *)arg1;*/ 1988 u_int namelen = arg2; 1989 struct kinfo_sigtramp kst; 1990 const struct sysentvec *sv; 1991 int error; 1992 1993 if (namelen > 1) 1994 return (EINVAL); 1995 /* ignore pid if passed in (freebsd compatibility) */ 1996 1997 sv = curproc->p_sysent; 1998 bzero(&kst, sizeof(kst)); 1999 if (sv->sv_szsigcode) { 2000 intptr_t sigbase; 2001 2002 sigbase = trunc_page64((intptr_t)PS_STRINGS - 2003 *sv->sv_szsigcode); 2004 sigbase -= SZSIGCODE_EXTRA_BYTES; 2005 2006 kst.ksigtramp_start = (void *)sigbase; 2007 kst.ksigtramp_end = (void *)(sigbase + *sv->sv_szsigcode); 2008 } 2009 error = SYSCTL_OUT(req, &kst, sizeof(kst)); 2010 2011 return (error); 2012 } 2013 2014 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 2015 2016 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, 2017 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK, 2018 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 2019 2020 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, 2021 CTLFLAG_RD | CTLFLAG_NOLOCK, 2022 sysctl_kern_proc, "Process table"); 2023 2024 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, 2025 CTLFLAG_RD | CTLFLAG_NOLOCK, 2026 sysctl_kern_proc, "Process table"); 2027 2028 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, 2029 CTLFLAG_RD | CTLFLAG_NOLOCK, 2030 sysctl_kern_proc, "Process table"); 2031 2032 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, 2033 CTLFLAG_RD | CTLFLAG_NOLOCK, 2034 sysctl_kern_proc, "Process table"); 2035 2036 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, 2037 CTLFLAG_RD | CTLFLAG_NOLOCK, 2038 sysctl_kern_proc, "Process table"); 2039 2040 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, 2041 CTLFLAG_RD | CTLFLAG_NOLOCK, 2042 sysctl_kern_proc, "Process table"); 2043 2044 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, 2045 CTLFLAG_RD | CTLFLAG_NOLOCK, 2046 sysctl_kern_proc, "Process table"); 2047 2048 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, 2049 CTLFLAG_RD | CTLFLAG_NOLOCK, 2050 sysctl_kern_proc, "Process table"); 2051 2052 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, 2053 CTLFLAG_RD | CTLFLAG_NOLOCK, 2054 sysctl_kern_proc, "Process table"); 2055 2056 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, 2057 CTLFLAG_RD | CTLFLAG_NOLOCK, 2058 sysctl_kern_proc, "Process table"); 2059 2060 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, 2061 CTLFLAG_RD | CTLFLAG_NOLOCK, 2062 sysctl_kern_proc, "Process table"); 2063 2064 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, 2065 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK, 2066 sysctl_kern_proc_args, "Process argument list"); 2067 2068 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, 2069 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK, 2070 sysctl_kern_proc_cwd, "Process argument list"); 2071 2072 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, 2073 CTLFLAG_RD | CTLFLAG_NOLOCK, 2074 sysctl_kern_proc_pathname, "Process executable path"); 2075 2076 SYSCTL_PROC(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, 2077 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK, 2078 0, 0, sysctl_kern_proc_sigtramp, "S,sigtramp", 2079 "Return sigtramp address range"); 2080