1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 32 * $FreeBSD: src/sys/kern/kern_proc.c,v 1.63.2.9 2003/05/08 07:47:16 kbyanc Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/proc.h> 41 #include <sys/vnode.h> 42 #include <sys/jail.h> 43 #include <sys/filedesc.h> 44 #include <sys/tty.h> 45 #include <sys/dsched.h> 46 #include <sys/signalvar.h> 47 #include <sys/spinlock.h> 48 #include <vm/vm.h> 49 #include <sys/lock.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_map.h> 52 #include <sys/user.h> 53 #include <machine/smp.h> 54 55 #include <sys/refcount.h> 56 #include <sys/spinlock2.h> 57 #include <sys/mplock2.h> 58 59 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 60 MALLOC_DEFINE(M_SESSION, "session", "session header"); 61 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 62 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 63 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 64 65 int ps_showallprocs = 1; 66 static int ps_showallthreads = 1; 67 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 68 &ps_showallprocs, 0, 69 "Unprivileged processes can see processes with different UID/GID"); 70 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 71 &ps_showallthreads, 0, 72 "Unprivileged processes can see kernel threads"); 73 74 static void pgdelete(struct pgrp *); 75 static void orphanpg(struct pgrp *pg); 76 static pid_t proc_getnewpid_locked(int random_offset); 77 78 /* 79 * Other process lists 80 */ 81 struct pidhashhead *pidhashtbl; 82 u_long pidhash; 83 struct pgrphashhead *pgrphashtbl; 84 u_long pgrphash; 85 struct proclist allproc; 86 struct proclist zombproc; 87 88 /* 89 * Random component to nextpid generation. We mix in a random factor to make 90 * it a little harder to predict. We sanity check the modulus value to avoid 91 * doing it in critical paths. Don't let it be too small or we pointlessly 92 * waste randomness entropy, and don't let it be impossibly large. Using a 93 * modulus that is too big causes a LOT more process table scans and slows 94 * down fork processing as the pidchecked caching is defeated. 95 */ 96 static int randompid = 0; 97 98 /* 99 * No requirements. 100 */ 101 static int 102 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 103 { 104 int error, pid; 105 106 pid = randompid; 107 error = sysctl_handle_int(oidp, &pid, 0, req); 108 if (error || !req->newptr) 109 return (error); 110 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 111 pid = PID_MAX - 100; 112 else if (pid < 2) /* NOP */ 113 pid = 0; 114 else if (pid < 100) /* Make it reasonable */ 115 pid = 100; 116 randompid = pid; 117 return (error); 118 } 119 120 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 121 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 122 123 /* 124 * Initialize global process hashing structures. 125 * 126 * Called from the low level boot code only. 127 */ 128 void 129 procinit(void) 130 { 131 LIST_INIT(&allproc); 132 LIST_INIT(&zombproc); 133 lwkt_init(); 134 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); 135 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); 136 uihashinit(); 137 } 138 139 /* 140 * Process hold/release support functions. These functions must be MPSAFE. 141 * Called via the PHOLD(), PRELE(), and PSTALL() macros. 142 * 143 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 144 * is issued unless someone is actually waiting for the process. 145 * 146 * Most holds are short-term, allowing a process scan or other similar 147 * operation to access a proc structure without it getting ripped out from 148 * under us. procfs and process-list sysctl ops also use the hold function 149 * interlocked with various p_flags to keep the vmspace intact when reading 150 * or writing a user process's address space. 151 * 152 * There are two situations where a hold count can be longer. Exiting lwps 153 * hold the process until the lwp is reaped, and the parent will hold the 154 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 155 * 156 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 157 * various critical points in the fork/exec and exit paths before proceeding. 158 */ 159 #define PLOCK_ZOMB 0x20000000 160 #define PLOCK_WAITING 0x40000000 161 #define PLOCK_MASK 0x1FFFFFFF 162 163 void 164 pstall(struct proc *p, const char *wmesg, int count) 165 { 166 int o; 167 int n; 168 169 for (;;) { 170 o = p->p_lock; 171 cpu_ccfence(); 172 if ((o & PLOCK_MASK) <= count) 173 break; 174 n = o | PLOCK_WAITING; 175 tsleep_interlock(&p->p_lock, 0); 176 177 /* 178 * If someone is trying to single-step the process during 179 * an exec or an exit they can deadlock us because procfs 180 * sleeps with the process held. 181 */ 182 if (p->p_stops) { 183 if (p->p_flags & P_INEXEC) { 184 wakeup(&p->p_stype); 185 } else if (p->p_flags & P_POSTEXIT) { 186 spin_lock(&p->p_spin); 187 p->p_stops = 0; 188 p->p_step = 0; 189 spin_unlock(&p->p_spin); 190 wakeup(&p->p_stype); 191 } 192 } 193 194 if (atomic_cmpset_int(&p->p_lock, o, n)) { 195 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 196 } 197 } 198 } 199 200 void 201 phold(struct proc *p) 202 { 203 atomic_add_int(&p->p_lock, 1); 204 } 205 206 /* 207 * WARNING! On last release (p) can become instantly invalid due to 208 * MP races. 209 */ 210 void 211 prele(struct proc *p) 212 { 213 int o; 214 int n; 215 216 /* 217 * Fast path 218 */ 219 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 220 return; 221 222 /* 223 * Slow path 224 */ 225 for (;;) { 226 o = p->p_lock; 227 KKASSERT((o & PLOCK_MASK) > 0); 228 cpu_ccfence(); 229 n = (o - 1) & ~PLOCK_WAITING; 230 if (atomic_cmpset_int(&p->p_lock, o, n)) { 231 if (o & PLOCK_WAITING) 232 wakeup(&p->p_lock); 233 break; 234 } 235 } 236 } 237 238 /* 239 * Hold and flag serialized for zombie reaping purposes. 240 * 241 * This function will fail if it has to block, returning non-zero with 242 * neither the flag set or the hold count bumped. Note that we must block 243 * without holding a ref, meaning that the caller must ensure that (p) 244 * remains valid through some other interlock (typically on its parent 245 * process's p_token). 246 * 247 * Zero is returned on success. The hold count will be incremented and 248 * the serialization flag acquired. Note that serialization is only against 249 * other pholdzomb() calls, not against phold() calls. 250 */ 251 int 252 pholdzomb(struct proc *p) 253 { 254 int o; 255 int n; 256 257 /* 258 * Fast path 259 */ 260 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 261 return(0); 262 263 /* 264 * Slow path 265 */ 266 for (;;) { 267 o = p->p_lock; 268 cpu_ccfence(); 269 if ((o & PLOCK_ZOMB) == 0) { 270 n = (o + 1) | PLOCK_ZOMB; 271 if (atomic_cmpset_int(&p->p_lock, o, n)) 272 return(0); 273 } else { 274 KKASSERT((o & PLOCK_MASK) > 0); 275 n = o | PLOCK_WAITING; 276 tsleep_interlock(&p->p_lock, 0); 277 if (atomic_cmpset_int(&p->p_lock, o, n)) { 278 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 279 /* (p) can be ripped out at this point */ 280 return(1); 281 } 282 } 283 } 284 } 285 286 /* 287 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 288 * 289 * WARNING! On last release (p) can become instantly invalid due to 290 * MP races. 291 */ 292 void 293 prelezomb(struct proc *p) 294 { 295 int o; 296 int n; 297 298 /* 299 * Fast path 300 */ 301 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 302 return; 303 304 /* 305 * Slow path 306 */ 307 KKASSERT(p->p_lock & PLOCK_ZOMB); 308 for (;;) { 309 o = p->p_lock; 310 KKASSERT((o & PLOCK_MASK) > 0); 311 cpu_ccfence(); 312 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 313 if (atomic_cmpset_int(&p->p_lock, o, n)) { 314 if (o & PLOCK_WAITING) 315 wakeup(&p->p_lock); 316 break; 317 } 318 } 319 } 320 321 /* 322 * Is p an inferior of the current process? 323 * 324 * No requirements. 325 * The caller must hold proc_token if the caller wishes a stable result. 326 */ 327 int 328 inferior(struct proc *p) 329 { 330 lwkt_gettoken(&proc_token); 331 while (p != curproc) { 332 if (p->p_pid == 0) { 333 lwkt_reltoken(&proc_token); 334 return (0); 335 } 336 p = p->p_pptr; 337 } 338 lwkt_reltoken(&proc_token); 339 return (1); 340 } 341 342 /* 343 * Locate a process by number. The returned process will be referenced and 344 * must be released with PRELE(). 345 * 346 * No requirements. 347 */ 348 struct proc * 349 pfind(pid_t pid) 350 { 351 struct proc *p = curproc; 352 353 /* 354 * Shortcut the current process 355 */ 356 if (p && p->p_pid == pid) { 357 PHOLD(p); 358 return (p); 359 } 360 361 /* 362 * Otherwise find it in the hash table. 363 */ 364 lwkt_gettoken(&proc_token); 365 LIST_FOREACH(p, PIDHASH(pid), p_hash) { 366 if (p->p_pid == pid) { 367 PHOLD(p); 368 lwkt_reltoken(&proc_token); 369 return (p); 370 } 371 } 372 lwkt_reltoken(&proc_token); 373 374 return (NULL); 375 } 376 377 /* 378 * Locate a process by number. The returned process is NOT referenced. 379 * The caller should hold proc_token if the caller wishes a stable result. 380 * 381 * No requirements. 382 */ 383 struct proc * 384 pfindn(pid_t pid) 385 { 386 struct proc *p = curproc; 387 388 /* 389 * Shortcut the current process 390 */ 391 if (p && p->p_pid == pid) 392 return (p); 393 394 lwkt_gettoken(&proc_token); 395 LIST_FOREACH(p, PIDHASH(pid), p_hash) { 396 if (p->p_pid == pid) { 397 lwkt_reltoken(&proc_token); 398 return (p); 399 } 400 } 401 lwkt_reltoken(&proc_token); 402 return (NULL); 403 } 404 405 void 406 pgref(struct pgrp *pgrp) 407 { 408 refcount_acquire(&pgrp->pg_refs); 409 } 410 411 void 412 pgrel(struct pgrp *pgrp) 413 { 414 if (refcount_release(&pgrp->pg_refs)) 415 pgdelete(pgrp); 416 } 417 418 /* 419 * Locate a process group by number. The returned process group will be 420 * referenced w/pgref() and must be released with pgrel() (or assigned 421 * somewhere if you wish to keep the reference). 422 * 423 * No requirements. 424 */ 425 struct pgrp * 426 pgfind(pid_t pgid) 427 { 428 struct pgrp *pgrp; 429 430 lwkt_gettoken(&proc_token); 431 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { 432 if (pgrp->pg_id == pgid) { 433 refcount_acquire(&pgrp->pg_refs); 434 lwkt_reltoken(&proc_token); 435 return (pgrp); 436 } 437 } 438 lwkt_reltoken(&proc_token); 439 return (NULL); 440 } 441 442 /* 443 * Move p to a new or existing process group (and session) 444 * 445 * No requirements. 446 */ 447 int 448 enterpgrp(struct proc *p, pid_t pgid, int mksess) 449 { 450 struct pgrp *pgrp; 451 struct pgrp *opgrp; 452 int error; 453 454 pgrp = pgfind(pgid); 455 456 KASSERT(pgrp == NULL || !mksess, 457 ("enterpgrp: setsid into non-empty pgrp")); 458 KASSERT(!SESS_LEADER(p), 459 ("enterpgrp: session leader attempted setpgrp")); 460 461 if (pgrp == NULL) { 462 pid_t savepid = p->p_pid; 463 struct proc *np; 464 /* 465 * new process group 466 */ 467 KASSERT(p->p_pid == pgid, 468 ("enterpgrp: new pgrp and pid != pgid")); 469 if ((np = pfindn(savepid)) == NULL || np != p) { 470 error = ESRCH; 471 goto fatal; 472 } 473 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK); 474 if (mksess) { 475 struct session *sess; 476 477 /* 478 * new session 479 */ 480 sess = kmalloc(sizeof(struct session), M_SESSION, 481 M_WAITOK); 482 sess->s_leader = p; 483 sess->s_sid = p->p_pid; 484 sess->s_count = 1; 485 sess->s_ttyvp = NULL; 486 sess->s_ttyp = NULL; 487 bcopy(p->p_session->s_login, sess->s_login, 488 sizeof(sess->s_login)); 489 pgrp->pg_session = sess; 490 KASSERT(p == curproc, 491 ("enterpgrp: mksession and p != curproc")); 492 lwkt_gettoken(&p->p_token); 493 p->p_flags &= ~P_CONTROLT; 494 lwkt_reltoken(&p->p_token); 495 } else { 496 pgrp->pg_session = p->p_session; 497 sess_hold(pgrp->pg_session); 498 } 499 pgrp->pg_id = pgid; 500 LIST_INIT(&pgrp->pg_members); 501 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); 502 pgrp->pg_jobc = 0; 503 SLIST_INIT(&pgrp->pg_sigiolst); 504 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 505 refcount_init(&pgrp->pg_refs, 1); 506 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 507 } else if (pgrp == p->p_pgrp) { 508 pgrel(pgrp); 509 goto done; 510 } /* else pgfind() referenced the pgrp */ 511 512 /* 513 * Adjust eligibility of affected pgrps to participate in job control. 514 * Increment eligibility counts before decrementing, otherwise we 515 * could reach 0 spuriously during the first call. 516 */ 517 lwkt_gettoken(&pgrp->pg_token); 518 lwkt_gettoken(&p->p_token); 519 fixjobc(p, pgrp, 1); 520 fixjobc(p, p->p_pgrp, 0); 521 while ((opgrp = p->p_pgrp) != NULL) { 522 opgrp = p->p_pgrp; 523 lwkt_gettoken(&opgrp->pg_token); 524 LIST_REMOVE(p, p_pglist); 525 p->p_pgrp = NULL; 526 lwkt_reltoken(&opgrp->pg_token); 527 pgrel(opgrp); 528 } 529 p->p_pgrp = pgrp; 530 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 531 lwkt_reltoken(&p->p_token); 532 lwkt_reltoken(&pgrp->pg_token); 533 done: 534 error = 0; 535 fatal: 536 return (error); 537 } 538 539 /* 540 * Remove process from process group 541 * 542 * No requirements. 543 */ 544 int 545 leavepgrp(struct proc *p) 546 { 547 struct pgrp *pg = p->p_pgrp; 548 549 lwkt_gettoken(&p->p_token); 550 pg = p->p_pgrp; 551 if (pg) { 552 pgref(pg); 553 lwkt_gettoken(&pg->pg_token); 554 if (p->p_pgrp == pg) { 555 p->p_pgrp = NULL; 556 LIST_REMOVE(p, p_pglist); 557 pgrel(pg); 558 } 559 lwkt_reltoken(&pg->pg_token); 560 lwkt_reltoken(&p->p_token); /* avoid chaining on rel */ 561 pgrel(pg); 562 } else { 563 lwkt_reltoken(&p->p_token); 564 } 565 return (0); 566 } 567 568 /* 569 * Delete a process group. Must be called only after the last ref has been 570 * released. 571 */ 572 static void 573 pgdelete(struct pgrp *pgrp) 574 { 575 /* 576 * Reset any sigio structures pointing to us as a result of 577 * F_SETOWN with our pgid. 578 */ 579 funsetownlst(&pgrp->pg_sigiolst); 580 581 if (pgrp->pg_session->s_ttyp != NULL && 582 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) 583 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 584 LIST_REMOVE(pgrp, pg_hash); 585 sess_rele(pgrp->pg_session); 586 kfree(pgrp, M_PGRP); 587 } 588 589 /* 590 * Adjust the ref count on a session structure. When the ref count falls to 591 * zero the tty is disassociated from the session and the session structure 592 * is freed. Note that tty assocation is not itself ref-counted. 593 * 594 * No requirements. 595 */ 596 void 597 sess_hold(struct session *sp) 598 { 599 lwkt_gettoken(&tty_token); 600 ++sp->s_count; 601 lwkt_reltoken(&tty_token); 602 } 603 604 /* 605 * No requirements. 606 */ 607 void 608 sess_rele(struct session *sp) 609 { 610 struct tty *tp; 611 612 KKASSERT(sp->s_count > 0); 613 lwkt_gettoken(&tty_token); 614 if (--sp->s_count == 0) { 615 if (sp->s_ttyp && sp->s_ttyp->t_session) { 616 #ifdef TTY_DO_FULL_CLOSE 617 /* FULL CLOSE, see ttyclearsession() */ 618 KKASSERT(sp->s_ttyp->t_session == sp); 619 sp->s_ttyp->t_session = NULL; 620 #else 621 /* HALF CLOSE, see ttyclearsession() */ 622 if (sp->s_ttyp->t_session == sp) 623 sp->s_ttyp->t_session = NULL; 624 #endif 625 } 626 if ((tp = sp->s_ttyp) != NULL) { 627 sp->s_ttyp = NULL; 628 ttyunhold(tp); 629 } 630 kfree(sp, M_SESSION); 631 } 632 lwkt_reltoken(&tty_token); 633 } 634 635 /* 636 * Adjust pgrp jobc counters when specified process changes process group. 637 * We count the number of processes in each process group that "qualify" 638 * the group for terminal job control (those with a parent in a different 639 * process group of the same session). If that count reaches zero, the 640 * process group becomes orphaned. Check both the specified process' 641 * process group and that of its children. 642 * entering == 0 => p is leaving specified group. 643 * entering == 1 => p is entering specified group. 644 * 645 * No requirements. 646 */ 647 void 648 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 649 { 650 struct pgrp *hispgrp; 651 struct session *mysession; 652 struct proc *np; 653 654 /* 655 * Check p's parent to see whether p qualifies its own process 656 * group; if so, adjust count for p's process group. 657 */ 658 lwkt_gettoken(&p->p_token); /* p_children scan */ 659 lwkt_gettoken(&pgrp->pg_token); 660 661 mysession = pgrp->pg_session; 662 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 663 hispgrp->pg_session == mysession) { 664 if (entering) 665 pgrp->pg_jobc++; 666 else if (--pgrp->pg_jobc == 0) 667 orphanpg(pgrp); 668 } 669 670 /* 671 * Check this process' children to see whether they qualify 672 * their process groups; if so, adjust counts for children's 673 * process groups. 674 */ 675 LIST_FOREACH(np, &p->p_children, p_sibling) { 676 PHOLD(np); 677 lwkt_gettoken(&np->p_token); 678 if ((hispgrp = np->p_pgrp) != pgrp && 679 hispgrp->pg_session == mysession && 680 np->p_stat != SZOMB) { 681 pgref(hispgrp); 682 lwkt_gettoken(&hispgrp->pg_token); 683 if (entering) 684 hispgrp->pg_jobc++; 685 else if (--hispgrp->pg_jobc == 0) 686 orphanpg(hispgrp); 687 lwkt_reltoken(&hispgrp->pg_token); 688 pgrel(hispgrp); 689 } 690 lwkt_reltoken(&np->p_token); 691 PRELE(np); 692 } 693 KKASSERT(pgrp->pg_refs > 0); 694 lwkt_reltoken(&pgrp->pg_token); 695 lwkt_reltoken(&p->p_token); 696 } 697 698 /* 699 * A process group has become orphaned; 700 * if there are any stopped processes in the group, 701 * hang-up all process in that group. 702 * 703 * The caller must hold pg_token. 704 */ 705 static void 706 orphanpg(struct pgrp *pg) 707 { 708 struct proc *p; 709 710 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 711 if (p->p_stat == SSTOP) { 712 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 713 ksignal(p, SIGHUP); 714 ksignal(p, SIGCONT); 715 } 716 return; 717 } 718 } 719 } 720 721 /* 722 * Add a new process to the allproc list and the PID hash. This 723 * also assigns a pid to the new process. 724 * 725 * No requirements. 726 */ 727 void 728 proc_add_allproc(struct proc *p) 729 { 730 int random_offset; 731 732 if ((random_offset = randompid) != 0) { 733 get_mplock(); 734 random_offset = karc4random() % random_offset; 735 rel_mplock(); 736 } 737 738 lwkt_gettoken(&proc_token); 739 p->p_pid = proc_getnewpid_locked(random_offset); 740 LIST_INSERT_HEAD(&allproc, p, p_list); 741 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash); 742 lwkt_reltoken(&proc_token); 743 } 744 745 /* 746 * Calculate a new process pid. This function is integrated into 747 * proc_add_allproc() to guarentee that the new pid is not reused before 748 * the new process can be added to the allproc list. 749 * 750 * The caller must hold proc_token. 751 */ 752 static 753 pid_t 754 proc_getnewpid_locked(int random_offset) 755 { 756 static pid_t nextpid; 757 static pid_t pidchecked; 758 struct proc *p; 759 760 /* 761 * Find an unused process ID. We remember a range of unused IDs 762 * ready to use (from nextpid+1 through pidchecked-1). 763 */ 764 nextpid = nextpid + 1 + random_offset; 765 retry: 766 /* 767 * If the process ID prototype has wrapped around, 768 * restart somewhat above 0, as the low-numbered procs 769 * tend to include daemons that don't exit. 770 */ 771 if (nextpid >= PID_MAX) { 772 nextpid = nextpid % PID_MAX; 773 if (nextpid < 100) 774 nextpid += 100; 775 pidchecked = 0; 776 } 777 if (nextpid >= pidchecked) { 778 int doingzomb = 0; 779 780 pidchecked = PID_MAX; 781 782 /* 783 * Scan the active and zombie procs to check whether this pid 784 * is in use. Remember the lowest pid that's greater 785 * than nextpid, so we can avoid checking for a while. 786 * 787 * NOTE: Processes in the midst of being forked may not 788 * yet have p_pgrp and p_pgrp->pg_session set up 789 * yet, so we have to check for NULL. 790 * 791 * Processes being torn down should be interlocked 792 * with proc_token prior to the clearing of their 793 * p_pgrp. 794 */ 795 p = LIST_FIRST(&allproc); 796 again: 797 for (; p != NULL; p = LIST_NEXT(p, p_list)) { 798 while (p->p_pid == nextpid || 799 (p->p_pgrp && p->p_pgrp->pg_id == nextpid) || 800 (p->p_pgrp && p->p_session && 801 p->p_session->s_sid == nextpid)) { 802 nextpid++; 803 if (nextpid >= pidchecked) 804 goto retry; 805 } 806 if (p->p_pid > nextpid && pidchecked > p->p_pid) 807 pidchecked = p->p_pid; 808 if (p->p_pgrp && 809 p->p_pgrp->pg_id > nextpid && 810 pidchecked > p->p_pgrp->pg_id) { 811 pidchecked = p->p_pgrp->pg_id; 812 } 813 if (p->p_pgrp && p->p_session && 814 p->p_session->s_sid > nextpid && 815 pidchecked > p->p_session->s_sid) { 816 pidchecked = p->p_session->s_sid; 817 } 818 } 819 if (!doingzomb) { 820 doingzomb = 1; 821 p = LIST_FIRST(&zombproc); 822 goto again; 823 } 824 } 825 return(nextpid); 826 } 827 828 /* 829 * Called from exit1 to remove a process from the allproc 830 * list and move it to the zombie list. 831 * 832 * Caller must hold p->p_token. We are required to wait until p_lock 833 * becomes zero before we can manipulate the list, allowing allproc 834 * scans to guarantee consistency during a list scan. 835 */ 836 void 837 proc_move_allproc_zombie(struct proc *p) 838 { 839 lwkt_gettoken(&proc_token); 840 PSTALL(p, "reap1", 0); 841 LIST_REMOVE(p, p_list); 842 LIST_INSERT_HEAD(&zombproc, p, p_list); 843 LIST_REMOVE(p, p_hash); 844 p->p_stat = SZOMB; 845 lwkt_reltoken(&proc_token); 846 dsched_exit_proc(p); 847 } 848 849 /* 850 * This routine is called from kern_wait() and will remove the process 851 * from the zombie list and the sibling list. This routine will block 852 * if someone has a lock on the proces (p_lock). 853 * 854 * Caller must hold p->p_token. We are required to wait until p_lock 855 * becomes zero before we can manipulate the list, allowing allproc 856 * scans to guarantee consistency during a list scan. 857 */ 858 void 859 proc_remove_zombie(struct proc *p) 860 { 861 lwkt_gettoken(&proc_token); 862 PSTALL(p, "reap2", 0); 863 LIST_REMOVE(p, p_list); /* off zombproc */ 864 LIST_REMOVE(p, p_sibling); 865 p->p_pptr = NULL; 866 lwkt_reltoken(&proc_token); 867 } 868 869 /* 870 * Handle various requirements prior to returning to usermode. Called from 871 * platform trap and system call code. 872 */ 873 void 874 lwpuserret(struct lwp *lp) 875 { 876 struct proc *p = lp->lwp_proc; 877 878 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 879 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 880 allocvnode_gc(); 881 } 882 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 883 lwkt_gettoken(&p->p_token); 884 lwp_exit(0); 885 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 886 } 887 } 888 889 /* 890 * Kernel threads run from user processes can also accumulate deferred 891 * actions which need to be acted upon. Callers include: 892 * 893 * nfsd - Can allocate lots of vnodes 894 */ 895 void 896 lwpkthreaddeferred(void) 897 { 898 struct lwp *lp = curthread->td_lwp; 899 900 if (lp) { 901 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 902 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 903 allocvnode_gc(); 904 } 905 } 906 } 907 908 /* 909 * Scan all processes on the allproc list. The process is automatically 910 * held for the callback. A return value of -1 terminates the loop. 911 * 912 * The callback is made with the process held and proc_token held. 913 * 914 * We limit the scan to the number of processes as-of the start of 915 * the scan so as not to get caught up in an endless loop if new processes 916 * are created more quickly than we can scan the old ones. Add a little 917 * slop to try to catch edge cases since nprocs can race. 918 * 919 * No requirements. 920 */ 921 void 922 allproc_scan(int (*callback)(struct proc *, void *), void *data) 923 { 924 struct proc *p; 925 int r; 926 int limit = nprocs + ncpus; 927 928 /* 929 * proc_token protects the allproc list and PHOLD() prevents the 930 * process from being removed from the allproc list or the zombproc 931 * list. 932 */ 933 lwkt_gettoken(&proc_token); 934 LIST_FOREACH(p, &allproc, p_list) { 935 PHOLD(p); 936 r = callback(p, data); 937 PRELE(p); 938 if (r < 0) 939 break; 940 if (--limit < 0) 941 break; 942 } 943 lwkt_reltoken(&proc_token); 944 } 945 946 /* 947 * Scan all lwps of processes on the allproc list. The lwp is automatically 948 * held for the callback. A return value of -1 terminates the loop. 949 * 950 * The callback is made with the proces and lwp both held, and proc_token held. 951 * 952 * No requirements. 953 */ 954 void 955 alllwp_scan(int (*callback)(struct lwp *, void *), void *data) 956 { 957 struct proc *p; 958 struct lwp *lp; 959 int r = 0; 960 961 /* 962 * proc_token protects the allproc list and PHOLD() prevents the 963 * process from being removed from the allproc list or the zombproc 964 * list. 965 */ 966 lwkt_gettoken(&proc_token); 967 LIST_FOREACH(p, &allproc, p_list) { 968 PHOLD(p); 969 FOREACH_LWP_IN_PROC(lp, p) { 970 LWPHOLD(lp); 971 r = callback(lp, data); 972 LWPRELE(lp); 973 } 974 PRELE(p); 975 if (r < 0) 976 break; 977 } 978 lwkt_reltoken(&proc_token); 979 } 980 981 /* 982 * Scan all processes on the zombproc list. The process is automatically 983 * held for the callback. A return value of -1 terminates the loop. 984 * 985 * No requirements. 986 * The callback is made with the proces held and proc_token held. 987 */ 988 void 989 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 990 { 991 struct proc *p; 992 int r; 993 994 lwkt_gettoken(&proc_token); 995 LIST_FOREACH(p, &zombproc, p_list) { 996 PHOLD(p); 997 r = callback(p, data); 998 PRELE(p); 999 if (r < 0) 1000 break; 1001 } 1002 lwkt_reltoken(&proc_token); 1003 } 1004 1005 #include "opt_ddb.h" 1006 #ifdef DDB 1007 #include <ddb/ddb.h> 1008 1009 /* 1010 * Debugging only 1011 */ 1012 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 1013 { 1014 struct pgrp *pgrp; 1015 struct proc *p; 1016 int i; 1017 1018 for (i = 0; i <= pgrphash; i++) { 1019 if (!LIST_EMPTY(&pgrphashtbl[i])) { 1020 kprintf("\tindx %d\n", i); 1021 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { 1022 kprintf( 1023 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", 1024 (void *)pgrp, (long)pgrp->pg_id, 1025 (void *)pgrp->pg_session, 1026 pgrp->pg_session->s_count, 1027 (void *)LIST_FIRST(&pgrp->pg_members)); 1028 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1029 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1030 (long)p->p_pid, (void *)p, 1031 (void *)p->p_pgrp); 1032 } 1033 } 1034 } 1035 } 1036 } 1037 #endif /* DDB */ 1038 1039 /* 1040 * Locate a process on the zombie list. Return a process or NULL. 1041 * The returned process will be referenced and the caller must release 1042 * it with PRELE(). 1043 * 1044 * No other requirements. 1045 */ 1046 struct proc * 1047 zpfind(pid_t pid) 1048 { 1049 struct proc *p; 1050 1051 lwkt_gettoken(&proc_token); 1052 LIST_FOREACH(p, &zombproc, p_list) { 1053 if (p->p_pid == pid) { 1054 PHOLD(p); 1055 lwkt_reltoken(&proc_token); 1056 return (p); 1057 } 1058 } 1059 lwkt_reltoken(&proc_token); 1060 return (NULL); 1061 } 1062 1063 /* 1064 * The caller must hold proc_token. 1065 */ 1066 static int 1067 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1068 { 1069 struct kinfo_proc ki; 1070 struct lwp *lp; 1071 int skp = 0, had_output = 0; 1072 int error; 1073 1074 bzero(&ki, sizeof(ki)); 1075 lwkt_gettoken(&p->p_token); 1076 fill_kinfo_proc(p, &ki); 1077 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1078 skp = 1; 1079 error = 0; 1080 FOREACH_LWP_IN_PROC(lp, p) { 1081 LWPHOLD(lp); 1082 fill_kinfo_lwp(lp, &ki.kp_lwp); 1083 had_output = 1; 1084 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1085 LWPRELE(lp); 1086 if (error) 1087 break; 1088 if (skp) 1089 break; 1090 } 1091 lwkt_reltoken(&p->p_token); 1092 /* We need to output at least the proc, even if there is no lwp. */ 1093 if (had_output == 0) { 1094 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1095 } 1096 return (error); 1097 } 1098 1099 /* 1100 * The caller must hold proc_token. 1101 */ 1102 static int 1103 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req, int flags) 1104 { 1105 struct kinfo_proc ki; 1106 int error; 1107 1108 fill_kinfo_proc_kthread(td, &ki); 1109 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1110 if (error) 1111 return error; 1112 return(0); 1113 } 1114 1115 /* 1116 * No requirements. 1117 */ 1118 static int 1119 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1120 { 1121 int *name = (int*) arg1; 1122 int oid = oidp->oid_number; 1123 u_int namelen = arg2; 1124 struct proc *p; 1125 struct proclist *plist; 1126 struct thread *td; 1127 struct thread *marker; 1128 int doingzomb, flags = 0; 1129 int error = 0; 1130 int n; 1131 int origcpu; 1132 struct ucred *cr1 = curproc->p_ucred; 1133 1134 flags = oid & KERN_PROC_FLAGMASK; 1135 oid &= ~KERN_PROC_FLAGMASK; 1136 1137 if ((oid == KERN_PROC_ALL && namelen != 0) || 1138 (oid != KERN_PROC_ALL && namelen != 1)) { 1139 return (EINVAL); 1140 } 1141 1142 /* 1143 * proc_token protects the allproc list and PHOLD() prevents the 1144 * process from being removed from the allproc list or the zombproc 1145 * list. 1146 */ 1147 lwkt_gettoken(&proc_token); 1148 if (oid == KERN_PROC_PID) { 1149 p = pfindn((pid_t)name[0]); 1150 if (p == NULL) 1151 goto post_threads; 1152 if (!PRISON_CHECK(cr1, p->p_ucred)) 1153 goto post_threads; 1154 PHOLD(p); 1155 error = sysctl_out_proc(p, req, flags); 1156 PRELE(p); 1157 goto post_threads; 1158 } 1159 1160 if (!req->oldptr) { 1161 /* overestimate by 5 procs */ 1162 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1163 if (error) 1164 goto post_threads; 1165 } 1166 for (doingzomb = 0; doingzomb <= 1; doingzomb++) { 1167 if (doingzomb) 1168 plist = &zombproc; 1169 else 1170 plist = &allproc; 1171 LIST_FOREACH(p, plist, p_list) { 1172 /* 1173 * Show a user only their processes. 1174 */ 1175 if ((!ps_showallprocs) && p_trespass(cr1, p->p_ucred)) 1176 continue; 1177 /* 1178 * Skip embryonic processes. 1179 */ 1180 if (p->p_stat == SIDL) 1181 continue; 1182 /* 1183 * TODO - make more efficient (see notes below). 1184 * do by session. 1185 */ 1186 switch (oid) { 1187 case KERN_PROC_PGRP: 1188 /* could do this by traversing pgrp */ 1189 if (p->p_pgrp == NULL || 1190 p->p_pgrp->pg_id != (pid_t)name[0]) 1191 continue; 1192 break; 1193 1194 case KERN_PROC_TTY: 1195 if ((p->p_flags & P_CONTROLT) == 0 || 1196 p->p_session == NULL || 1197 p->p_session->s_ttyp == NULL || 1198 dev2udev(p->p_session->s_ttyp->t_dev) != 1199 (udev_t)name[0]) 1200 continue; 1201 break; 1202 1203 case KERN_PROC_UID: 1204 if (p->p_ucred == NULL || 1205 p->p_ucred->cr_uid != (uid_t)name[0]) 1206 continue; 1207 break; 1208 1209 case KERN_PROC_RUID: 1210 if (p->p_ucred == NULL || 1211 p->p_ucred->cr_ruid != (uid_t)name[0]) 1212 continue; 1213 break; 1214 } 1215 1216 if (!PRISON_CHECK(cr1, p->p_ucred)) 1217 continue; 1218 PHOLD(p); 1219 error = sysctl_out_proc(p, req, flags); 1220 PRELE(p); 1221 if (error) 1222 goto post_threads; 1223 } 1224 } 1225 1226 /* 1227 * Iterate over all active cpus and scan their thread list. Start 1228 * with the next logical cpu and end with our original cpu. We 1229 * migrate our own thread to each target cpu in order to safely scan 1230 * its thread list. In the last loop we migrate back to our original 1231 * cpu. 1232 */ 1233 origcpu = mycpu->gd_cpuid; 1234 if (!ps_showallthreads || jailed(cr1)) 1235 goto post_threads; 1236 1237 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1238 marker->td_flags = TDF_MARKER; 1239 error = 0; 1240 1241 for (n = 1; n <= ncpus; ++n) { 1242 globaldata_t rgd; 1243 int nid; 1244 1245 nid = (origcpu + n) % ncpus; 1246 if ((smp_active_mask & CPUMASK(nid)) == 0) 1247 continue; 1248 rgd = globaldata_find(nid); 1249 lwkt_setcpu_self(rgd); 1250 1251 crit_enter(); 1252 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1253 1254 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1255 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1256 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1257 if (td->td_flags & TDF_MARKER) 1258 continue; 1259 if (td->td_proc) 1260 continue; 1261 1262 lwkt_hold(td); 1263 crit_exit(); 1264 1265 switch (oid) { 1266 case KERN_PROC_PGRP: 1267 case KERN_PROC_TTY: 1268 case KERN_PROC_UID: 1269 case KERN_PROC_RUID: 1270 break; 1271 default: 1272 error = sysctl_out_proc_kthread(td, req, 1273 doingzomb); 1274 break; 1275 } 1276 lwkt_rele(td); 1277 crit_enter(); 1278 if (error) 1279 break; 1280 } 1281 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1282 crit_exit(); 1283 1284 if (error) 1285 break; 1286 } 1287 kfree(marker, M_TEMP); 1288 1289 post_threads: 1290 lwkt_reltoken(&proc_token); 1291 return (error); 1292 } 1293 1294 /* 1295 * This sysctl allows a process to retrieve the argument list or process 1296 * title for another process without groping around in the address space 1297 * of the other process. It also allow a process to set its own "process 1298 * title to a string of its own choice. 1299 * 1300 * No requirements. 1301 */ 1302 static int 1303 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1304 { 1305 int *name = (int*) arg1; 1306 u_int namelen = arg2; 1307 struct proc *p; 1308 struct pargs *opa; 1309 struct pargs *pa; 1310 int error = 0; 1311 struct ucred *cr1 = curproc->p_ucred; 1312 1313 if (namelen != 1) 1314 return (EINVAL); 1315 1316 p = pfind((pid_t)name[0]); 1317 if (p == NULL) 1318 goto done; 1319 lwkt_gettoken(&p->p_token); 1320 1321 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1322 goto done; 1323 1324 if (req->newptr && curproc != p) { 1325 error = EPERM; 1326 goto done; 1327 } 1328 if (req->oldptr && (pa = p->p_args) != NULL) { 1329 refcount_acquire(&pa->ar_ref); 1330 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1331 if (refcount_release(&pa->ar_ref)) 1332 kfree(pa, M_PARGS); 1333 } 1334 if (req->newptr == NULL) 1335 goto done; 1336 1337 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1338 goto done; 1339 } 1340 1341 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK); 1342 refcount_init(&pa->ar_ref, 1); 1343 pa->ar_length = req->newlen; 1344 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1345 if (error) { 1346 kfree(pa, M_PARGS); 1347 goto done; 1348 } 1349 1350 1351 /* 1352 * Replace p_args with the new pa. p_args may have previously 1353 * been NULL. 1354 */ 1355 opa = p->p_args; 1356 p->p_args = pa; 1357 1358 if (opa) { 1359 KKASSERT(opa->ar_ref > 0); 1360 if (refcount_release(&opa->ar_ref)) { 1361 kfree(opa, M_PARGS); 1362 /* opa = NULL; */ 1363 } 1364 } 1365 done: 1366 if (p) { 1367 lwkt_reltoken(&p->p_token); 1368 PRELE(p); 1369 } 1370 return (error); 1371 } 1372 1373 static int 1374 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1375 { 1376 int *name = (int*) arg1; 1377 u_int namelen = arg2; 1378 struct proc *p; 1379 int error = 0; 1380 char *fullpath, *freepath; 1381 struct ucred *cr1 = curproc->p_ucred; 1382 1383 if (namelen != 1) 1384 return (EINVAL); 1385 1386 p = pfind((pid_t)name[0]); 1387 if (p == NULL) 1388 goto done; 1389 lwkt_gettoken(&p->p_token); 1390 1391 /* 1392 * If we are not allowed to see other args, we certainly shouldn't 1393 * get the cwd either. Also check the usual trespassing. 1394 */ 1395 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1396 goto done; 1397 1398 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 1399 struct nchandle nch; 1400 1401 cache_copy(&p->p_fd->fd_ncdir, &nch); 1402 error = cache_fullpath(p, &nch, NULL, 1403 &fullpath, &freepath, 0); 1404 cache_drop(&nch); 1405 if (error) 1406 goto done; 1407 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 1408 kfree(freepath, M_TEMP); 1409 } 1410 1411 done: 1412 if (p) { 1413 lwkt_reltoken(&p->p_token); 1414 PRELE(p); 1415 } 1416 return (error); 1417 } 1418 1419 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1420 1421 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT, 1422 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 1423 1424 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD, 1425 sysctl_kern_proc, "Process table"); 1426 1427 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD, 1428 sysctl_kern_proc, "Process table"); 1429 1430 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD, 1431 sysctl_kern_proc, "Process table"); 1432 1433 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD, 1434 sysctl_kern_proc, "Process table"); 1435 1436 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD, 1437 sysctl_kern_proc, "Process table"); 1438 1439 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD, 1440 sysctl_kern_proc, "Process table"); 1441 1442 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD, 1443 sysctl_kern_proc, "Process table"); 1444 1445 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD, 1446 sysctl_kern_proc, "Process table"); 1447 1448 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD, 1449 sysctl_kern_proc, "Process table"); 1450 1451 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD, 1452 sysctl_kern_proc, "Process table"); 1453 1454 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD, 1455 sysctl_kern_proc, "Process table"); 1456 1457 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY, 1458 sysctl_kern_proc_args, "Process argument list"); 1459 1460 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY, 1461 sysctl_kern_proc_cwd, "Process argument list"); 1462