1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 32 * $FreeBSD: src/sys/kern/kern_proc.c,v 1.63.2.9 2003/05/08 07:47:16 kbyanc Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/proc.h> 41 #include <sys/vnode.h> 42 #include <sys/jail.h> 43 #include <sys/filedesc.h> 44 #include <sys/tty.h> 45 #include <sys/dsched.h> 46 #include <sys/signalvar.h> 47 #include <sys/spinlock.h> 48 #include <vm/vm.h> 49 #include <sys/lock.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_map.h> 52 #include <sys/user.h> 53 #include <machine/smp.h> 54 55 #include <sys/refcount.h> 56 #include <sys/spinlock2.h> 57 #include <sys/mplock2.h> 58 59 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 60 MALLOC_DEFINE(M_SESSION, "session", "session header"); 61 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 62 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 63 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 64 65 int ps_showallprocs = 1; 66 static int ps_showallthreads = 1; 67 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 68 &ps_showallprocs, 0, 69 "Unprivileged processes can see processes with different UID/GID"); 70 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 71 &ps_showallthreads, 0, 72 "Unprivileged processes can see kernel threads"); 73 74 static void orphanpg(struct pgrp *pg); 75 static pid_t proc_getnewpid_locked(int random_offset); 76 77 /* 78 * Other process lists 79 */ 80 struct pidhashhead *pidhashtbl; 81 u_long pidhash; 82 struct pgrphashhead *pgrphashtbl; 83 u_long pgrphash; 84 struct proclist allproc; 85 struct proclist zombproc; 86 struct spinlock pghash_spin = SPINLOCK_INITIALIZER(&pghash_spin); 87 88 /* 89 * Random component to nextpid generation. We mix in a random factor to make 90 * it a little harder to predict. We sanity check the modulus value to avoid 91 * doing it in critical paths. Don't let it be too small or we pointlessly 92 * waste randomness entropy, and don't let it be impossibly large. Using a 93 * modulus that is too big causes a LOT more process table scans and slows 94 * down fork processing as the pidchecked caching is defeated. 95 */ 96 static int randompid = 0; 97 98 /* 99 * No requirements. 100 */ 101 static int 102 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 103 { 104 int error, pid; 105 106 pid = randompid; 107 error = sysctl_handle_int(oidp, &pid, 0, req); 108 if (error || !req->newptr) 109 return (error); 110 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 111 pid = PID_MAX - 100; 112 else if (pid < 2) /* NOP */ 113 pid = 0; 114 else if (pid < 100) /* Make it reasonable */ 115 pid = 100; 116 randompid = pid; 117 return (error); 118 } 119 120 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 121 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 122 123 /* 124 * Initialize global process hashing structures. 125 * 126 * Called from the low level boot code only. 127 */ 128 void 129 procinit(void) 130 { 131 LIST_INIT(&allproc); 132 LIST_INIT(&zombproc); 133 lwkt_init(); 134 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); 135 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); 136 uihashinit(); 137 } 138 139 /* 140 * Process hold/release support functions. These functions must be MPSAFE. 141 * Called via the PHOLD(), PRELE(), and PSTALL() macros. 142 * 143 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 144 * is issued unless someone is actually waiting for the process. 145 * 146 * Most holds are short-term, allowing a process scan or other similar 147 * operation to access a proc structure without it getting ripped out from 148 * under us. procfs and process-list sysctl ops also use the hold function 149 * interlocked with various p_flags to keep the vmspace intact when reading 150 * or writing a user process's address space. 151 * 152 * There are two situations where a hold count can be longer. Exiting lwps 153 * hold the process until the lwp is reaped, and the parent will hold the 154 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 155 * 156 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 157 * various critical points in the fork/exec and exit paths before proceeding. 158 */ 159 #define PLOCK_ZOMB 0x20000000 160 #define PLOCK_WAITING 0x40000000 161 #define PLOCK_MASK 0x1FFFFFFF 162 163 void 164 pstall(struct proc *p, const char *wmesg, int count) 165 { 166 int o; 167 int n; 168 169 for (;;) { 170 o = p->p_lock; 171 cpu_ccfence(); 172 if ((o & PLOCK_MASK) <= count) 173 break; 174 n = o | PLOCK_WAITING; 175 tsleep_interlock(&p->p_lock, 0); 176 177 /* 178 * If someone is trying to single-step the process during 179 * an exec or an exit they can deadlock us because procfs 180 * sleeps with the process held. 181 */ 182 if (p->p_stops) { 183 if (p->p_flags & P_INEXEC) { 184 wakeup(&p->p_stype); 185 } else if (p->p_flags & P_POSTEXIT) { 186 spin_lock(&p->p_spin); 187 p->p_stops = 0; 188 p->p_step = 0; 189 spin_unlock(&p->p_spin); 190 wakeup(&p->p_stype); 191 } 192 } 193 194 if (atomic_cmpset_int(&p->p_lock, o, n)) { 195 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 196 } 197 } 198 } 199 200 void 201 phold(struct proc *p) 202 { 203 atomic_add_int(&p->p_lock, 1); 204 } 205 206 /* 207 * WARNING! On last release (p) can become instantly invalid due to 208 * MP races. 209 */ 210 void 211 prele(struct proc *p) 212 { 213 int o; 214 int n; 215 216 /* 217 * Fast path 218 */ 219 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 220 return; 221 222 /* 223 * Slow path 224 */ 225 for (;;) { 226 o = p->p_lock; 227 KKASSERT((o & PLOCK_MASK) > 0); 228 cpu_ccfence(); 229 n = (o - 1) & ~PLOCK_WAITING; 230 if (atomic_cmpset_int(&p->p_lock, o, n)) { 231 if (o & PLOCK_WAITING) 232 wakeup(&p->p_lock); 233 break; 234 } 235 } 236 } 237 238 /* 239 * Hold and flag serialized for zombie reaping purposes. 240 * 241 * This function will fail if it has to block, returning non-zero with 242 * neither the flag set or the hold count bumped. Note that we must block 243 * without holding a ref, meaning that the caller must ensure that (p) 244 * remains valid through some other interlock (typically on its parent 245 * process's p_token). 246 * 247 * Zero is returned on success. The hold count will be incremented and 248 * the serialization flag acquired. Note that serialization is only against 249 * other pholdzomb() calls, not against phold() calls. 250 */ 251 int 252 pholdzomb(struct proc *p) 253 { 254 int o; 255 int n; 256 257 /* 258 * Fast path 259 */ 260 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 261 return(0); 262 263 /* 264 * Slow path 265 */ 266 for (;;) { 267 o = p->p_lock; 268 cpu_ccfence(); 269 if ((o & PLOCK_ZOMB) == 0) { 270 n = (o + 1) | PLOCK_ZOMB; 271 if (atomic_cmpset_int(&p->p_lock, o, n)) 272 return(0); 273 } else { 274 KKASSERT((o & PLOCK_MASK) > 0); 275 n = o | PLOCK_WAITING; 276 tsleep_interlock(&p->p_lock, 0); 277 if (atomic_cmpset_int(&p->p_lock, o, n)) { 278 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 279 /* (p) can be ripped out at this point */ 280 return(1); 281 } 282 } 283 } 284 } 285 286 /* 287 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 288 * 289 * WARNING! On last release (p) can become instantly invalid due to 290 * MP races. 291 */ 292 void 293 prelezomb(struct proc *p) 294 { 295 int o; 296 int n; 297 298 /* 299 * Fast path 300 */ 301 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 302 return; 303 304 /* 305 * Slow path 306 */ 307 KKASSERT(p->p_lock & PLOCK_ZOMB); 308 for (;;) { 309 o = p->p_lock; 310 KKASSERT((o & PLOCK_MASK) > 0); 311 cpu_ccfence(); 312 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 313 if (atomic_cmpset_int(&p->p_lock, o, n)) { 314 if (o & PLOCK_WAITING) 315 wakeup(&p->p_lock); 316 break; 317 } 318 } 319 } 320 321 /* 322 * Is p an inferior of the current process? 323 * 324 * No requirements. 325 * The caller must hold proc_token if the caller wishes a stable result. 326 */ 327 int 328 inferior(struct proc *p) 329 { 330 lwkt_gettoken_shared(&proc_token); 331 while (p != curproc) { 332 if (p->p_pid == 0) { 333 lwkt_reltoken(&proc_token); 334 return (0); 335 } 336 p = p->p_pptr; 337 } 338 lwkt_reltoken(&proc_token); 339 return (1); 340 } 341 342 /* 343 * Locate a process by number. The returned process will be referenced and 344 * must be released with PRELE(). 345 * 346 * No requirements. 347 */ 348 struct proc * 349 pfind(pid_t pid) 350 { 351 struct proc *p = curproc; 352 353 /* 354 * Shortcut the current process 355 */ 356 if (p && p->p_pid == pid) { 357 PHOLD(p); 358 return (p); 359 } 360 361 /* 362 * Otherwise find it in the hash table. 363 */ 364 lwkt_gettoken_shared(&proc_token); 365 LIST_FOREACH(p, PIDHASH(pid), p_hash) { 366 if (p->p_pid == pid) { 367 PHOLD(p); 368 lwkt_reltoken(&proc_token); 369 return (p); 370 } 371 } 372 lwkt_reltoken(&proc_token); 373 374 return (NULL); 375 } 376 377 /* 378 * Locate a process by number. The returned process is NOT referenced. 379 * The caller should hold proc_token if the caller wishes a stable result. 380 * 381 * No requirements. 382 */ 383 struct proc * 384 pfindn(pid_t pid) 385 { 386 struct proc *p = curproc; 387 388 /* 389 * Shortcut the current process 390 */ 391 if (p && p->p_pid == pid) 392 return (p); 393 394 lwkt_gettoken_shared(&proc_token); 395 LIST_FOREACH(p, PIDHASH(pid), p_hash) { 396 if (p->p_pid == pid) { 397 lwkt_reltoken(&proc_token); 398 return (p); 399 } 400 } 401 lwkt_reltoken(&proc_token); 402 return (NULL); 403 } 404 405 void 406 pgref(struct pgrp *pgrp) 407 { 408 refcount_acquire(&pgrp->pg_refs); 409 } 410 411 void 412 pgrel(struct pgrp *pgrp) 413 { 414 int count; 415 416 for (;;) { 417 count = pgrp->pg_refs; 418 cpu_ccfence(); 419 KKASSERT(count > 0); 420 if (count == 1) { 421 spin_lock(&pghash_spin); 422 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0)) 423 break; 424 spin_unlock(&pghash_spin); 425 /* retry */ 426 } else { 427 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1)) 428 return; 429 /* retry */ 430 } 431 } 432 433 /* 434 * Successful 1->0 transition, pghash_spin is held. 435 */ 436 LIST_REMOVE(pgrp, pg_hash); 437 spin_unlock(&pghash_spin); 438 439 /* 440 * Reset any sigio structures pointing to us as a result of 441 * F_SETOWN with our pgid. 442 */ 443 funsetownlst(&pgrp->pg_sigiolst); 444 445 if (pgrp->pg_session->s_ttyp != NULL && 446 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { 447 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 448 } 449 sess_rele(pgrp->pg_session); 450 kfree(pgrp, M_PGRP); 451 } 452 453 /* 454 * Locate a process group by number. The returned process group will be 455 * referenced w/pgref() and must be released with pgrel() (or assigned 456 * somewhere if you wish to keep the reference). 457 * 458 * No requirements. 459 */ 460 struct pgrp * 461 pgfind(pid_t pgid) 462 { 463 struct pgrp *pgrp; 464 465 spin_lock_shared(&pghash_spin); 466 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { 467 if (pgrp->pg_id == pgid) { 468 refcount_acquire(&pgrp->pg_refs); 469 spin_unlock_shared(&pghash_spin); 470 return (pgrp); 471 } 472 } 473 spin_unlock_shared(&pghash_spin); 474 return (NULL); 475 } 476 477 /* 478 * Move p to a new or existing process group (and session) 479 * 480 * No requirements. 481 */ 482 int 483 enterpgrp(struct proc *p, pid_t pgid, int mksess) 484 { 485 struct pgrp *pgrp; 486 struct pgrp *opgrp; 487 int error; 488 489 pgrp = pgfind(pgid); 490 491 KASSERT(pgrp == NULL || !mksess, 492 ("enterpgrp: setsid into non-empty pgrp")); 493 KASSERT(!SESS_LEADER(p), 494 ("enterpgrp: session leader attempted setpgrp")); 495 496 if (pgrp == NULL) { 497 pid_t savepid = p->p_pid; 498 struct proc *np; 499 /* 500 * new process group 501 */ 502 KASSERT(p->p_pid == pgid, 503 ("enterpgrp: new pgrp and pid != pgid")); 504 if ((np = pfindn(savepid)) == NULL || np != p) { 505 error = ESRCH; 506 goto fatal; 507 } 508 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK); 509 if (mksess) { 510 struct session *sess; 511 512 /* 513 * new session 514 */ 515 sess = kmalloc(sizeof(struct session), M_SESSION, 516 M_WAITOK); 517 sess->s_leader = p; 518 sess->s_sid = p->p_pid; 519 sess->s_count = 1; 520 sess->s_ttyvp = NULL; 521 sess->s_ttyp = NULL; 522 bcopy(p->p_session->s_login, sess->s_login, 523 sizeof(sess->s_login)); 524 pgrp->pg_session = sess; 525 KASSERT(p == curproc, 526 ("enterpgrp: mksession and p != curproc")); 527 lwkt_gettoken(&p->p_token); 528 p->p_flags &= ~P_CONTROLT; 529 lwkt_reltoken(&p->p_token); 530 } else { 531 pgrp->pg_session = p->p_session; 532 sess_hold(pgrp->pg_session); 533 } 534 pgrp->pg_id = pgid; 535 LIST_INIT(&pgrp->pg_members); 536 pgrp->pg_jobc = 0; 537 SLIST_INIT(&pgrp->pg_sigiolst); 538 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 539 refcount_init(&pgrp->pg_refs, 1); 540 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 541 spin_lock(&pghash_spin); 542 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); 543 spin_unlock(&pghash_spin); 544 } else if (pgrp == p->p_pgrp) { 545 pgrel(pgrp); 546 goto done; 547 } /* else pgfind() referenced the pgrp */ 548 549 lwkt_gettoken(&pgrp->pg_token); 550 lwkt_gettoken(&p->p_token); 551 552 /* 553 * Replace p->p_pgrp, handling any races that occur. 554 */ 555 while ((opgrp = p->p_pgrp) != NULL) { 556 pgref(opgrp); 557 lwkt_gettoken(&opgrp->pg_token); 558 if (opgrp != p->p_pgrp) { 559 lwkt_reltoken(&opgrp->pg_token); 560 pgrel(opgrp); 561 continue; 562 } 563 LIST_REMOVE(p, p_pglist); 564 break; 565 } 566 p->p_pgrp = pgrp; 567 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 568 569 /* 570 * Adjust eligibility of affected pgrps to participate in job control. 571 * Increment eligibility counts before decrementing, otherwise we 572 * could reach 0 spuriously during the first call. 573 */ 574 fixjobc(p, pgrp, 1); 575 if (opgrp) { 576 fixjobc(p, opgrp, 0); 577 lwkt_reltoken(&opgrp->pg_token); 578 pgrel(opgrp); /* manual pgref */ 579 pgrel(opgrp); /* p->p_pgrp ref */ 580 } 581 lwkt_reltoken(&p->p_token); 582 lwkt_reltoken(&pgrp->pg_token); 583 done: 584 error = 0; 585 fatal: 586 return (error); 587 } 588 589 /* 590 * Remove process from process group 591 * 592 * No requirements. 593 */ 594 int 595 leavepgrp(struct proc *p) 596 { 597 struct pgrp *pg = p->p_pgrp; 598 599 lwkt_gettoken(&p->p_token); 600 while ((pg = p->p_pgrp) != NULL) { 601 pgref(pg); 602 lwkt_gettoken(&pg->pg_token); 603 if (p->p_pgrp != pg) { 604 lwkt_reltoken(&pg->pg_token); 605 pgrel(pg); 606 continue; 607 } 608 p->p_pgrp = NULL; 609 LIST_REMOVE(p, p_pglist); 610 lwkt_reltoken(&pg->pg_token); 611 pgrel(pg); /* manual pgref */ 612 pgrel(pg); /* p->p_pgrp ref */ 613 break; 614 } 615 lwkt_reltoken(&p->p_token); 616 617 return (0); 618 } 619 620 /* 621 * Adjust the ref count on a session structure. When the ref count falls to 622 * zero the tty is disassociated from the session and the session structure 623 * is freed. Note that tty assocation is not itself ref-counted. 624 * 625 * No requirements. 626 */ 627 void 628 sess_hold(struct session *sp) 629 { 630 atomic_add_int(&sp->s_count, 1); 631 } 632 633 /* 634 * No requirements. 635 */ 636 void 637 sess_rele(struct session *sp) 638 { 639 struct tty *tp; 640 int count; 641 642 for (;;) { 643 count = sp->s_count; 644 cpu_ccfence(); 645 KKASSERT(count > 0); 646 if (count == 1) { 647 lwkt_gettoken(&tty_token); 648 if (atomic_cmpset_int(&sp->s_count, 1, 0)) 649 break; 650 lwkt_reltoken(&tty_token); 651 /* retry */ 652 } else { 653 if (atomic_cmpset_int(&sp->s_count, count, count - 1)) 654 return; 655 /* retry */ 656 } 657 } 658 659 /* 660 * Successful 1->0 transition and tty_token is held. 661 */ 662 if (sp->s_ttyp && sp->s_ttyp->t_session) { 663 #ifdef TTY_DO_FULL_CLOSE 664 /* FULL CLOSE, see ttyclearsession() */ 665 KKASSERT(sp->s_ttyp->t_session == sp); 666 sp->s_ttyp->t_session = NULL; 667 #else 668 /* HALF CLOSE, see ttyclearsession() */ 669 if (sp->s_ttyp->t_session == sp) 670 sp->s_ttyp->t_session = NULL; 671 #endif 672 } 673 if ((tp = sp->s_ttyp) != NULL) { 674 sp->s_ttyp = NULL; 675 ttyunhold(tp); 676 } 677 kfree(sp, M_SESSION); 678 lwkt_reltoken(&tty_token); 679 } 680 681 /* 682 * Adjust pgrp jobc counters when specified process changes process group. 683 * We count the number of processes in each process group that "qualify" 684 * the group for terminal job control (those with a parent in a different 685 * process group of the same session). If that count reaches zero, the 686 * process group becomes orphaned. Check both the specified process' 687 * process group and that of its children. 688 * entering == 0 => p is leaving specified group. 689 * entering == 1 => p is entering specified group. 690 * 691 * No requirements. 692 */ 693 void 694 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 695 { 696 struct pgrp *hispgrp; 697 struct session *mysession; 698 struct proc *np; 699 700 /* 701 * Check p's parent to see whether p qualifies its own process 702 * group; if so, adjust count for p's process group. 703 */ 704 lwkt_gettoken(&p->p_token); /* p_children scan */ 705 lwkt_gettoken(&pgrp->pg_token); 706 707 mysession = pgrp->pg_session; 708 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 709 hispgrp->pg_session == mysession) { 710 if (entering) 711 pgrp->pg_jobc++; 712 else if (--pgrp->pg_jobc == 0) 713 orphanpg(pgrp); 714 } 715 716 /* 717 * Check this process' children to see whether they qualify 718 * their process groups; if so, adjust counts for children's 719 * process groups. 720 */ 721 LIST_FOREACH(np, &p->p_children, p_sibling) { 722 PHOLD(np); 723 lwkt_gettoken(&np->p_token); 724 if ((hispgrp = np->p_pgrp) != pgrp && 725 hispgrp->pg_session == mysession && 726 np->p_stat != SZOMB) { 727 pgref(hispgrp); 728 lwkt_gettoken(&hispgrp->pg_token); 729 if (entering) 730 hispgrp->pg_jobc++; 731 else if (--hispgrp->pg_jobc == 0) 732 orphanpg(hispgrp); 733 lwkt_reltoken(&hispgrp->pg_token); 734 pgrel(hispgrp); 735 } 736 lwkt_reltoken(&np->p_token); 737 PRELE(np); 738 } 739 KKASSERT(pgrp->pg_refs > 0); 740 lwkt_reltoken(&pgrp->pg_token); 741 lwkt_reltoken(&p->p_token); 742 } 743 744 /* 745 * A process group has become orphaned; 746 * if there are any stopped processes in the group, 747 * hang-up all process in that group. 748 * 749 * The caller must hold pg_token. 750 */ 751 static void 752 orphanpg(struct pgrp *pg) 753 { 754 struct proc *p; 755 756 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 757 if (p->p_stat == SSTOP) { 758 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 759 ksignal(p, SIGHUP); 760 ksignal(p, SIGCONT); 761 } 762 return; 763 } 764 } 765 } 766 767 /* 768 * Add a new process to the allproc list and the PID hash. This 769 * also assigns a pid to the new process. 770 * 771 * No requirements. 772 */ 773 void 774 proc_add_allproc(struct proc *p) 775 { 776 int random_offset; 777 778 if ((random_offset = randompid) != 0) { 779 get_mplock(); 780 random_offset = karc4random() % random_offset; 781 rel_mplock(); 782 } 783 784 lwkt_gettoken(&proc_token); 785 p->p_pid = proc_getnewpid_locked(random_offset); 786 LIST_INSERT_HEAD(&allproc, p, p_list); 787 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash); 788 lwkt_reltoken(&proc_token); 789 } 790 791 /* 792 * Calculate a new process pid. This function is integrated into 793 * proc_add_allproc() to guarentee that the new pid is not reused before 794 * the new process can be added to the allproc list. 795 * 796 * The caller must hold proc_token. 797 */ 798 static 799 pid_t 800 proc_getnewpid_locked(int random_offset) 801 { 802 static pid_t nextpid; 803 static pid_t pidchecked; 804 struct proc *p; 805 806 /* 807 * Find an unused process ID. We remember a range of unused IDs 808 * ready to use (from nextpid+1 through pidchecked-1). 809 */ 810 nextpid = nextpid + 1 + random_offset; 811 retry: 812 /* 813 * If the process ID prototype has wrapped around, 814 * restart somewhat above 0, as the low-numbered procs 815 * tend to include daemons that don't exit. 816 */ 817 if (nextpid >= PID_MAX) { 818 nextpid = nextpid % PID_MAX; 819 if (nextpid < 100) 820 nextpid += 100; 821 pidchecked = 0; 822 } 823 if (nextpid >= pidchecked) { 824 int doingzomb = 0; 825 826 pidchecked = PID_MAX; 827 828 /* 829 * Scan the active and zombie procs to check whether this pid 830 * is in use. Remember the lowest pid that's greater 831 * than nextpid, so we can avoid checking for a while. 832 * 833 * NOTE: Processes in the midst of being forked may not 834 * yet have p_pgrp and p_pgrp->pg_session set up 835 * yet, so we have to check for NULL. 836 * 837 * Processes being torn down should be interlocked 838 * with proc_token prior to the clearing of their 839 * p_pgrp. 840 */ 841 p = LIST_FIRST(&allproc); 842 again: 843 for (; p != NULL; p = LIST_NEXT(p, p_list)) { 844 while (p->p_pid == nextpid || 845 (p->p_pgrp && p->p_pgrp->pg_id == nextpid) || 846 (p->p_pgrp && p->p_session && 847 p->p_session->s_sid == nextpid)) { 848 nextpid++; 849 if (nextpid >= pidchecked) 850 goto retry; 851 } 852 if (p->p_pid > nextpid && pidchecked > p->p_pid) 853 pidchecked = p->p_pid; 854 if (p->p_pgrp && 855 p->p_pgrp->pg_id > nextpid && 856 pidchecked > p->p_pgrp->pg_id) { 857 pidchecked = p->p_pgrp->pg_id; 858 } 859 if (p->p_pgrp && p->p_session && 860 p->p_session->s_sid > nextpid && 861 pidchecked > p->p_session->s_sid) { 862 pidchecked = p->p_session->s_sid; 863 } 864 } 865 if (!doingzomb) { 866 doingzomb = 1; 867 p = LIST_FIRST(&zombproc); 868 goto again; 869 } 870 } 871 return(nextpid); 872 } 873 874 /* 875 * Called from exit1 to remove a process from the allproc 876 * list and move it to the zombie list. 877 * 878 * Caller must hold p->p_token. We are required to wait until p_lock 879 * becomes zero before we can manipulate the list, allowing allproc 880 * scans to guarantee consistency during a list scan. 881 */ 882 void 883 proc_move_allproc_zombie(struct proc *p) 884 { 885 lwkt_gettoken(&proc_token); 886 PSTALL(p, "reap1", 0); 887 LIST_REMOVE(p, p_list); 888 LIST_INSERT_HEAD(&zombproc, p, p_list); 889 LIST_REMOVE(p, p_hash); 890 p->p_stat = SZOMB; 891 lwkt_reltoken(&proc_token); 892 dsched_exit_proc(p); 893 } 894 895 /* 896 * This routine is called from kern_wait() and will remove the process 897 * from the zombie list and the sibling list. This routine will block 898 * if someone has a lock on the proces (p_lock). 899 * 900 * Caller must hold p->p_token. We are required to wait until p_lock 901 * becomes zero before we can manipulate the list, allowing allproc 902 * scans to guarantee consistency during a list scan. 903 */ 904 void 905 proc_remove_zombie(struct proc *p) 906 { 907 lwkt_gettoken(&proc_token); 908 PSTALL(p, "reap2", 0); 909 LIST_REMOVE(p, p_list); /* off zombproc */ 910 LIST_REMOVE(p, p_sibling); 911 p->p_pptr = NULL; 912 lwkt_reltoken(&proc_token); 913 } 914 915 /* 916 * Handle various requirements prior to returning to usermode. Called from 917 * platform trap and system call code. 918 */ 919 void 920 lwpuserret(struct lwp *lp) 921 { 922 struct proc *p = lp->lwp_proc; 923 924 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 925 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 926 allocvnode_gc(); 927 } 928 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 929 lwkt_gettoken(&p->p_token); 930 lwp_exit(0); 931 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 932 } 933 } 934 935 /* 936 * Kernel threads run from user processes can also accumulate deferred 937 * actions which need to be acted upon. Callers include: 938 * 939 * nfsd - Can allocate lots of vnodes 940 */ 941 void 942 lwpkthreaddeferred(void) 943 { 944 struct lwp *lp = curthread->td_lwp; 945 946 if (lp) { 947 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 948 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 949 allocvnode_gc(); 950 } 951 } 952 } 953 954 /* 955 * Scan all processes on the allproc list. The process is automatically 956 * held for the callback. A return value of -1 terminates the loop. 957 * 958 * The callback is made with the process held and proc_token held. 959 * 960 * We limit the scan to the number of processes as-of the start of 961 * the scan so as not to get caught up in an endless loop if new processes 962 * are created more quickly than we can scan the old ones. Add a little 963 * slop to try to catch edge cases since nprocs can race. 964 * 965 * No requirements. 966 */ 967 void 968 allproc_scan(int (*callback)(struct proc *, void *), void *data) 969 { 970 struct proc *p; 971 int r; 972 int limit = nprocs + ncpus; 973 974 /* 975 * proc_token protects the allproc list and PHOLD() prevents the 976 * process from being removed from the allproc list or the zombproc 977 * list. 978 */ 979 lwkt_gettoken(&proc_token); 980 LIST_FOREACH(p, &allproc, p_list) { 981 PHOLD(p); 982 r = callback(p, data); 983 PRELE(p); 984 if (r < 0) 985 break; 986 if (--limit < 0) 987 break; 988 } 989 lwkt_reltoken(&proc_token); 990 } 991 992 /* 993 * Scan all lwps of processes on the allproc list. The lwp is automatically 994 * held for the callback. A return value of -1 terminates the loop. 995 * 996 * The callback is made with the proces and lwp both held, and proc_token held. 997 * 998 * No requirements. 999 */ 1000 void 1001 alllwp_scan(int (*callback)(struct lwp *, void *), void *data) 1002 { 1003 struct proc *p; 1004 struct lwp *lp; 1005 int r = 0; 1006 1007 /* 1008 * proc_token protects the allproc list and PHOLD() prevents the 1009 * process from being removed from the allproc list or the zombproc 1010 * list. 1011 */ 1012 lwkt_gettoken(&proc_token); 1013 LIST_FOREACH(p, &allproc, p_list) { 1014 PHOLD(p); 1015 lwkt_gettoken(&p->p_token); 1016 FOREACH_LWP_IN_PROC(lp, p) { 1017 LWPHOLD(lp); 1018 r = callback(lp, data); 1019 LWPRELE(lp); 1020 } 1021 lwkt_reltoken(&p->p_token); 1022 PRELE(p); 1023 if (r < 0) 1024 break; 1025 } 1026 lwkt_reltoken(&proc_token); 1027 } 1028 1029 /* 1030 * Scan all processes on the zombproc list. The process is automatically 1031 * held for the callback. A return value of -1 terminates the loop. 1032 * 1033 * No requirements. 1034 * The callback is made with the proces held and proc_token held. 1035 */ 1036 void 1037 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 1038 { 1039 struct proc *p; 1040 int r; 1041 1042 lwkt_gettoken(&proc_token); 1043 LIST_FOREACH(p, &zombproc, p_list) { 1044 PHOLD(p); 1045 r = callback(p, data); 1046 PRELE(p); 1047 if (r < 0) 1048 break; 1049 } 1050 lwkt_reltoken(&proc_token); 1051 } 1052 1053 #include "opt_ddb.h" 1054 #ifdef DDB 1055 #include <ddb/ddb.h> 1056 1057 /* 1058 * Debugging only 1059 */ 1060 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 1061 { 1062 struct pgrp *pgrp; 1063 struct proc *p; 1064 int i; 1065 1066 for (i = 0; i <= pgrphash; i++) { 1067 if (!LIST_EMPTY(&pgrphashtbl[i])) { 1068 kprintf("\tindx %d\n", i); 1069 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { 1070 kprintf( 1071 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", 1072 (void *)pgrp, (long)pgrp->pg_id, 1073 (void *)pgrp->pg_session, 1074 pgrp->pg_session->s_count, 1075 (void *)LIST_FIRST(&pgrp->pg_members)); 1076 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1077 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1078 (long)p->p_pid, (void *)p, 1079 (void *)p->p_pgrp); 1080 } 1081 } 1082 } 1083 } 1084 } 1085 #endif /* DDB */ 1086 1087 /* 1088 * Locate a process on the zombie list. Return a process or NULL. 1089 * The returned process will be referenced and the caller must release 1090 * it with PRELE(). 1091 * 1092 * No other requirements. 1093 */ 1094 struct proc * 1095 zpfind(pid_t pid) 1096 { 1097 struct proc *p; 1098 1099 lwkt_gettoken_shared(&proc_token); 1100 LIST_FOREACH(p, &zombproc, p_list) { 1101 if (p->p_pid == pid) { 1102 PHOLD(p); 1103 lwkt_reltoken(&proc_token); 1104 return (p); 1105 } 1106 } 1107 lwkt_reltoken(&proc_token); 1108 return (NULL); 1109 } 1110 1111 /* 1112 * The caller must hold proc_token. 1113 */ 1114 static int 1115 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1116 { 1117 struct kinfo_proc ki; 1118 struct lwp *lp; 1119 int skp = 0, had_output = 0; 1120 int error; 1121 1122 bzero(&ki, sizeof(ki)); 1123 lwkt_gettoken_shared(&p->p_token); 1124 fill_kinfo_proc(p, &ki); 1125 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1126 skp = 1; 1127 error = 0; 1128 FOREACH_LWP_IN_PROC(lp, p) { 1129 LWPHOLD(lp); 1130 fill_kinfo_lwp(lp, &ki.kp_lwp); 1131 had_output = 1; 1132 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1133 LWPRELE(lp); 1134 if (error) 1135 break; 1136 if (skp) 1137 break; 1138 } 1139 lwkt_reltoken(&p->p_token); 1140 /* We need to output at least the proc, even if there is no lwp. */ 1141 if (had_output == 0) { 1142 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1143 } 1144 return (error); 1145 } 1146 1147 /* 1148 * The caller must hold proc_token. 1149 */ 1150 static int 1151 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req, int flags) 1152 { 1153 struct kinfo_proc ki; 1154 int error; 1155 1156 fill_kinfo_proc_kthread(td, &ki); 1157 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1158 if (error) 1159 return error; 1160 return(0); 1161 } 1162 1163 /* 1164 * No requirements. 1165 */ 1166 static int 1167 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1168 { 1169 int *name = (int*) arg1; 1170 int oid = oidp->oid_number; 1171 u_int namelen = arg2; 1172 struct proc *p; 1173 struct proclist *plist; 1174 struct thread *td; 1175 struct thread *marker; 1176 int doingzomb, flags = 0; 1177 int error = 0; 1178 int n; 1179 int origcpu; 1180 struct ucred *cr1 = curproc->p_ucred; 1181 1182 flags = oid & KERN_PROC_FLAGMASK; 1183 oid &= ~KERN_PROC_FLAGMASK; 1184 1185 if ((oid == KERN_PROC_ALL && namelen != 0) || 1186 (oid != KERN_PROC_ALL && namelen != 1)) { 1187 return (EINVAL); 1188 } 1189 1190 /* 1191 * proc_token protects the allproc list and PHOLD() prevents the 1192 * process from being removed from the allproc list or the zombproc 1193 * list. 1194 */ 1195 if (oid == KERN_PROC_PID) { 1196 p = pfind((pid_t)name[0]); 1197 if (p) { 1198 if (PRISON_CHECK(cr1, p->p_ucred)) 1199 error = sysctl_out_proc(p, req, flags); 1200 PRELE(p); 1201 } 1202 goto post_threads; 1203 } 1204 p = NULL; 1205 1206 if (!req->oldptr) { 1207 /* overestimate by 5 procs */ 1208 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1209 if (error) 1210 goto post_threads; 1211 } 1212 for (doingzomb = 0; doingzomb <= 1; doingzomb++) { 1213 if (doingzomb) 1214 plist = &zombproc; 1215 else 1216 plist = &allproc; 1217 1218 lwkt_gettoken_shared(&proc_token); 1219 1220 LIST_FOREACH(p, plist, p_list) { 1221 /* 1222 * Show a user only their processes. 1223 */ 1224 if ((!ps_showallprocs) && p_trespass(cr1, p->p_ucred)) 1225 continue; 1226 /* 1227 * Skip embryonic processes. 1228 */ 1229 if (p->p_stat == SIDL) 1230 continue; 1231 /* 1232 * TODO - make more efficient (see notes below). 1233 * do by session. 1234 */ 1235 switch (oid) { 1236 case KERN_PROC_PGRP: 1237 /* could do this by traversing pgrp */ 1238 if (p->p_pgrp == NULL || 1239 p->p_pgrp->pg_id != (pid_t)name[0]) 1240 continue; 1241 break; 1242 1243 case KERN_PROC_TTY: 1244 if ((p->p_flags & P_CONTROLT) == 0 || 1245 p->p_session == NULL || 1246 p->p_session->s_ttyp == NULL || 1247 dev2udev(p->p_session->s_ttyp->t_dev) != 1248 (udev_t)name[0]) 1249 continue; 1250 break; 1251 1252 case KERN_PROC_UID: 1253 if (p->p_ucred == NULL || 1254 p->p_ucred->cr_uid != (uid_t)name[0]) 1255 continue; 1256 break; 1257 1258 case KERN_PROC_RUID: 1259 if (p->p_ucred == NULL || 1260 p->p_ucred->cr_ruid != (uid_t)name[0]) 1261 continue; 1262 break; 1263 } 1264 1265 if (!PRISON_CHECK(cr1, p->p_ucred)) 1266 continue; 1267 PHOLD(p); 1268 error = sysctl_out_proc(p, req, flags); 1269 PRELE(p); 1270 if (error) { 1271 lwkt_reltoken(&proc_token); 1272 goto post_threads; 1273 } 1274 } 1275 lwkt_reltoken(&proc_token); 1276 } 1277 1278 /* 1279 * Iterate over all active cpus and scan their thread list. Start 1280 * with the next logical cpu and end with our original cpu. We 1281 * migrate our own thread to each target cpu in order to safely scan 1282 * its thread list. In the last loop we migrate back to our original 1283 * cpu. 1284 */ 1285 origcpu = mycpu->gd_cpuid; 1286 if (!ps_showallthreads || jailed(cr1)) 1287 goto post_threads; 1288 1289 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1290 marker->td_flags = TDF_MARKER; 1291 error = 0; 1292 1293 for (n = 1; n <= ncpus; ++n) { 1294 globaldata_t rgd; 1295 int nid; 1296 1297 nid = (origcpu + n) % ncpus; 1298 if ((smp_active_mask & CPUMASK(nid)) == 0) 1299 continue; 1300 rgd = globaldata_find(nid); 1301 lwkt_setcpu_self(rgd); 1302 1303 crit_enter(); 1304 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1305 1306 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1307 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1308 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1309 if (td->td_flags & TDF_MARKER) 1310 continue; 1311 if (td->td_proc) 1312 continue; 1313 1314 lwkt_hold(td); 1315 crit_exit(); 1316 1317 switch (oid) { 1318 case KERN_PROC_PGRP: 1319 case KERN_PROC_TTY: 1320 case KERN_PROC_UID: 1321 case KERN_PROC_RUID: 1322 break; 1323 default: 1324 error = sysctl_out_proc_kthread(td, req, 1325 doingzomb); 1326 break; 1327 } 1328 lwkt_rele(td); 1329 crit_enter(); 1330 if (error) 1331 break; 1332 } 1333 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1334 crit_exit(); 1335 1336 if (error) 1337 break; 1338 } 1339 1340 /* 1341 * Userland scheduler expects us to return on the same cpu we 1342 * started on. 1343 */ 1344 if (mycpu->gd_cpuid != origcpu) 1345 lwkt_setcpu_self(globaldata_find(origcpu)); 1346 1347 kfree(marker, M_TEMP); 1348 1349 post_threads: 1350 return (error); 1351 } 1352 1353 /* 1354 * This sysctl allows a process to retrieve the argument list or process 1355 * title for another process without groping around in the address space 1356 * of the other process. It also allow a process to set its own "process 1357 * title to a string of its own choice. 1358 * 1359 * No requirements. 1360 */ 1361 static int 1362 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1363 { 1364 int *name = (int*) arg1; 1365 u_int namelen = arg2; 1366 struct proc *p; 1367 struct pargs *opa; 1368 struct pargs *pa; 1369 int error = 0; 1370 struct ucred *cr1 = curproc->p_ucred; 1371 1372 if (namelen != 1) 1373 return (EINVAL); 1374 1375 p = pfind((pid_t)name[0]); 1376 if (p == NULL) 1377 goto done; 1378 lwkt_gettoken(&p->p_token); 1379 1380 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1381 goto done; 1382 1383 if (req->newptr && curproc != p) { 1384 error = EPERM; 1385 goto done; 1386 } 1387 if (req->oldptr && (pa = p->p_args) != NULL) { 1388 refcount_acquire(&pa->ar_ref); 1389 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1390 if (refcount_release(&pa->ar_ref)) 1391 kfree(pa, M_PARGS); 1392 } 1393 if (req->newptr == NULL) 1394 goto done; 1395 1396 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1397 goto done; 1398 } 1399 1400 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK); 1401 refcount_init(&pa->ar_ref, 1); 1402 pa->ar_length = req->newlen; 1403 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1404 if (error) { 1405 kfree(pa, M_PARGS); 1406 goto done; 1407 } 1408 1409 1410 /* 1411 * Replace p_args with the new pa. p_args may have previously 1412 * been NULL. 1413 */ 1414 opa = p->p_args; 1415 p->p_args = pa; 1416 1417 if (opa) { 1418 KKASSERT(opa->ar_ref > 0); 1419 if (refcount_release(&opa->ar_ref)) { 1420 kfree(opa, M_PARGS); 1421 /* opa = NULL; */ 1422 } 1423 } 1424 done: 1425 if (p) { 1426 lwkt_reltoken(&p->p_token); 1427 PRELE(p); 1428 } 1429 return (error); 1430 } 1431 1432 static int 1433 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1434 { 1435 int *name = (int*) arg1; 1436 u_int namelen = arg2; 1437 struct proc *p; 1438 int error = 0; 1439 char *fullpath, *freepath; 1440 struct ucred *cr1 = curproc->p_ucred; 1441 1442 if (namelen != 1) 1443 return (EINVAL); 1444 1445 p = pfind((pid_t)name[0]); 1446 if (p == NULL) 1447 goto done; 1448 lwkt_gettoken_shared(&p->p_token); 1449 1450 /* 1451 * If we are not allowed to see other args, we certainly shouldn't 1452 * get the cwd either. Also check the usual trespassing. 1453 */ 1454 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1455 goto done; 1456 1457 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 1458 struct nchandle nch; 1459 1460 cache_copy(&p->p_fd->fd_ncdir, &nch); 1461 error = cache_fullpath(p, &nch, NULL, 1462 &fullpath, &freepath, 0); 1463 cache_drop(&nch); 1464 if (error) 1465 goto done; 1466 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 1467 kfree(freepath, M_TEMP); 1468 } 1469 1470 done: 1471 if (p) { 1472 lwkt_reltoken(&p->p_token); 1473 PRELE(p); 1474 } 1475 return (error); 1476 } 1477 1478 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1479 1480 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT, 1481 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 1482 1483 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD, 1484 sysctl_kern_proc, "Process table"); 1485 1486 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD, 1487 sysctl_kern_proc, "Process table"); 1488 1489 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD, 1490 sysctl_kern_proc, "Process table"); 1491 1492 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD, 1493 sysctl_kern_proc, "Process table"); 1494 1495 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD, 1496 sysctl_kern_proc, "Process table"); 1497 1498 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD, 1499 sysctl_kern_proc, "Process table"); 1500 1501 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD, 1502 sysctl_kern_proc, "Process table"); 1503 1504 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD, 1505 sysctl_kern_proc, "Process table"); 1506 1507 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD, 1508 sysctl_kern_proc, "Process table"); 1509 1510 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD, 1511 sysctl_kern_proc, "Process table"); 1512 1513 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD, 1514 sysctl_kern_proc, "Process table"); 1515 1516 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY, 1517 sysctl_kern_proc_args, "Process argument list"); 1518 1519 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY, 1520 sysctl_kern_proc_cwd, "Process argument list"); 1521