123370Smckusick /* 237728Smckusick * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 337728Smckusick * All rights reserved. 423370Smckusick * 544434Sbostic * %sccs.include.redist.c% 637728Smckusick * 7*47547Skarels * @(#)kern_fork.c 7.25 (Berkeley) 03/17/91 823370Smckusick */ 912791Ssam 1017090Sbloom #include "param.h" 1117090Sbloom #include "systm.h" 1217090Sbloom #include "map.h" 1345914Smckusick #include "filedesc.h" 1417090Sbloom #include "kernel.h" 15*47547Skarels #include "malloc.h" 1617090Sbloom #include "proc.h" 17*47547Skarels #include "resourcevar.h" 1837728Smckusick #include "vnode.h" 1917090Sbloom #include "seg.h" 2017090Sbloom #include "file.h" 2117090Sbloom #include "acct.h" 2240812Smarc #include "ktrace.h" 2312791Ssam 2442921Smckusick /* ARGSUSED */ 2542921Smckusick fork(p, uap, retval) 2642921Smckusick struct proc *p; 2742921Smckusick struct args *uap; 2842921Smckusick int retval[]; 2912791Ssam { 3012791Ssam 3144404Skarels return (fork1(p, 0, retval)); 3212791Ssam } 3312791Ssam 3442921Smckusick /* ARGSUSED */ 3542921Smckusick vfork(p, uap, retval) 3642921Smckusick struct proc *p; 3742921Smckusick struct args *uap; 3842921Smckusick int retval[]; 3912791Ssam { 4012791Ssam 4144404Skarels return (fork1(p, 1, retval)); 4212791Ssam } 4312791Ssam 44*47547Skarels int nprocs = 1; /* process 0 */ 45*47547Skarels 4642921Smckusick fork1(p1, isvfork, retval) 4742921Smckusick register struct proc *p1; 4842921Smckusick int isvfork, retval[]; 4912791Ssam { 5042921Smckusick register struct proc *p2; 51*47547Skarels register int count, uid; 52*47547Skarels static int nextpid, pidchecked = 0; 5312791Ssam 54*47547Skarels count = 0; 55*47547Skarels if ((uid = p1->p_ucred->cr_uid) != 0) { 5642921Smckusick for (p2 = allproc; p2; p2 = p2->p_nxt) 57*47547Skarels if (p2->p_ucred->cr_uid == uid) 58*47547Skarels count++; 5942921Smckusick for (p2 = zombproc; p2; p2 = p2->p_nxt) 60*47547Skarels if (p2->p_ucred->cr_uid == uid) 61*47547Skarels count++; 6212791Ssam } 6312791Ssam /* 64*47547Skarels * Although process entries are dynamically entries, 65*47547Skarels * we still keep a global limit on the maximum number 66*47547Skarels * we will create. Don't allow a nonprivileged user 67*47547Skarels * to exceed its current limit or to bring us within one 68*47547Skarels * of the global limit; don't let root exceed the limit. 69*47547Skarels * nprocs is the current number of processes, 70*47547Skarels * maxproc is the limit. 7112791Ssam */ 72*47547Skarels retval[1] = 0; 73*47547Skarels if (nprocs >= maxproc || uid == 0 && nprocs >= maxproc + 1) { 7412791Ssam tablefull("proc"); 7542921Smckusick return (EAGAIN); 7612791Ssam } 77*47547Skarels if (count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) 78*47547Skarels return (EAGAIN); 7912791Ssam 8012791Ssam /* 81*47547Skarels * Find an unused process ID. 82*47547Skarels * We remember a range of unused IDs ready to use 83*47547Skarels * (from nextpid+1 through pidchecked-1). 8412791Ssam */ 85*47547Skarels nextpid++; 8612791Ssam retry: 87*47547Skarels /* 88*47547Skarels * If the process ID prototype has wrapped around, 89*47547Skarels * restart somewhat above 0, as the low-numbered procs 90*47547Skarels * tend to include daemons that don't exit. 91*47547Skarels */ 92*47547Skarels if (nextpid >= PID_MAX) { 93*47547Skarels nextpid = 100; 9416528Skarels pidchecked = 0; 9512791Ssam } 96*47547Skarels if (nextpid >= pidchecked) { 9716528Skarels int doingzomb = 0; 9816578Ssam 9945005Skarels pidchecked = PID_MAX; 10016528Skarels /* 101*47547Skarels * Scan the active and zombie procs to check whether this pid 10216528Skarels * is in use. Remember the lowest pid that's greater 103*47547Skarels * than nextpid, so we can avoid checking for a while. 10416528Skarels */ 105*47547Skarels p2 = allproc; 10616528Skarels again: 107*47547Skarels for (; p2 != NULL; p2 = p2->p_nxt) { 108*47547Skarels if (p2->p_pid == nextpid || 109*47547Skarels p2->p_pgrp->pg_id == nextpid) { 110*47547Skarels nextpid++; 111*47547Skarels if (nextpid >= pidchecked) 11216528Skarels goto retry; 11316528Skarels } 114*47547Skarels if (p2->p_pid > nextpid && pidchecked > p2->p_pid) 115*47547Skarels pidchecked = p2->p_pid; 116*47547Skarels if (p2->p_pgrp->pg_id > nextpid && 117*47547Skarels pidchecked > p2->p_pgrp->pg_id) 118*47547Skarels pidchecked = p2->p_pgrp->pg_id; 11916528Skarels } 12016528Skarels if (!doingzomb) { 12116528Skarels doingzomb = 1; 122*47547Skarels p2 = zombproc; 12316528Skarels goto again; 12416528Skarels } 12512791Ssam } 12612791Ssam 12716528Skarels 12812791Ssam /* 129*47547Skarels * Allocate new proc. 130*47547Skarels * Link onto allproc (this should probably be delayed). 131*47547Skarels */ 132*47547Skarels MALLOC(p2, struct proc *, sizeof(struct proc), M_PROC, M_WAITOK); 133*47547Skarels nprocs++; 134*47547Skarels p2->p_nxt = allproc; 135*47547Skarels p2->p_nxt->p_prev = &p2->p_nxt; /* allproc is never NULL */ 136*47547Skarels p2->p_prev = &allproc; 137*47547Skarels allproc = p2; 138*47547Skarels p2->p_link = NULL; /* shouldn't be necessary */ 139*47547Skarels p2->p_rlink = NULL; /* shouldn't be necessary */ 140*47547Skarels 141*47547Skarels /* 14212791Ssam * Make a proc table entry for the new process. 143*47547Skarels * Start by zeroing the section of proc that is zero-initialized, 144*47547Skarels * then copy the section that is copied directly from the parent. 14512791Ssam */ 146*47547Skarels bzero(&p2->p_startzero, 147*47547Skarels (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); 148*47547Skarels bcopy(&p1->p_startcopy, &p2->p_startcopy, 149*47547Skarels (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 150*47547Skarels 151*47547Skarels /* 152*47547Skarels * Duplicate sub-structures as needed. 153*47547Skarels * Increase reference counts on shared objects. 154*47547Skarels */ 155*47547Skarels MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred), 156*47547Skarels M_SUBPROC, M_WAITOK); 157*47547Skarels bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred)); 158*47547Skarels crhold(p1->p_ucred); 159*47547Skarels 160*47547Skarels p2->p_fd = fdcopy(p1); 161*47547Skarels p2->p_stats = p1->p_stats; /* XXX move; in u. */ 162*47547Skarels /* 163*47547Skarels * If p_limit is still copy-on-write, bump refcnt, 164*47547Skarels * otherwise get a copy that won't be modified. 165*47547Skarels * (If PL_SHAREMOD is clear, the structure is shared 166*47547Skarels * copy-on-write.) 167*47547Skarels */ 168*47547Skarels if (p1->p_limit->p_lflags & PL_SHAREMOD) 169*47547Skarels p2->p_limit = limcopy(p1->p_limit); 170*47547Skarels else { 171*47547Skarels p2->p_limit = p1->p_limit; 172*47547Skarels p2->p_limit->p_refcnt++; 173*47547Skarels } 174*47547Skarels p2->p_sigacts = p1->p_sigacts; /* XXX move; in u. */ 175*47547Skarels 176*47547Skarels p2->p_flag = SLOAD | (p1->p_flag & (SPAGV|SHPUX)); 177*47547Skarels if (p1->p_session->s_ttyvp != NULL && p1->p_flag & SCTTY) 178*47547Skarels p2->p_flag |= SCTTY; 17945727Smckusick if (isvfork) 180*47547Skarels p2->p_flag |= SPPWAIT; 181*47547Skarels p2->p_stat = SIDL; 182*47547Skarels p2->p_pid = nextpid; 18340812Smarc { 184*47547Skarels struct proc **hash = &pidhash[PIDHASH(p2->p_pid)]; 18540812Smarc 186*47547Skarels p2->p_hash = *hash; 187*47547Skarels *hash = p2; 18840812Smarc } 189*47547Skarels p2->p_pgrpnxt = p1->p_pgrpnxt; 190*47547Skarels p1->p_pgrpnxt = p2; 191*47547Skarels p2->p_pptr = p1; 192*47547Skarels p2->p_osptr = p1->p_cptr; 193*47547Skarels if (p1->p_cptr) 194*47547Skarels p1->p_cptr->p_ysptr = p2; 195*47547Skarels p1->p_cptr = p2; 196*47547Skarels #ifdef KTRACE 19712791Ssam /* 198*47547Skarels * Copy traceflag and tracefile if enabled. 199*47547Skarels * If not inherited, these were zeroed above. 20012791Ssam */ 201*47547Skarels if (p1->p_traceflag&KTRFAC_INHERIT) { 202*47547Skarels p2->p_traceflag = p1->p_traceflag; 203*47547Skarels if ((p2->p_tracep = p1->p_tracep) != NULL) 204*47547Skarels VREF(p2->p_tracep); 205*47547Skarels } 20645727Smckusick #endif 20712791Ssam 208*47547Skarels p2->p_regs = p1->p_regs; /* XXX move this */ 209*47547Skarels #if defined(tahoe) 210*47547Skarels p2->p_vmspace->p_ckey = p1->p_vmspace->p_ckey; /* XXX move this */ 211*47547Skarels #endif 212*47547Skarels 21312791Ssam /* 21412791Ssam * This begins the section where we must prevent the parent 21512791Ssam * from being swapped. 21612791Ssam */ 217*47547Skarels p1->p_flag |= SKEEP; 218*47547Skarels if (vm_fork(p1, p2, isvfork)) { 219*47547Skarels /* 220*47547Skarels * Child process. Set start time, return parent pid, 221*47547Skarels * and mark as child in retval[1]. 222*47547Skarels */ 22341180Smarc (void) splclock(); 224*47547Skarels p2->p_stats->p_start = time; 22541180Smarc (void) spl0(); 226*47547Skarels retval[0] = p1->p_pid; 227*47547Skarels retval[1] = 1; 228*47547Skarels p2->p_acflag = AFORK; 229*47547Skarels return (0); 23041180Smarc } 23112791Ssam 23212791Ssam /* 23312791Ssam * Make child runnable and add to run queue. 23412791Ssam */ 235*47547Skarels (void) splhigh(); 236*47547Skarels p2->p_stat = SRUN; 237*47547Skarels setrq(p2); 23812791Ssam (void) spl0(); 23912791Ssam 24012791Ssam /* 24112791Ssam * Now can be swapped. 24212791Ssam */ 243*47547Skarels p1->p_flag &= ~SKEEP; 24412791Ssam 24512791Ssam /* 24645727Smckusick * XXX preserve synchronization semantics of vfork 247*47547Skarels * If waiting for child to exec or exit, set SPPWAIT 248*47547Skarels * on child, and sleep on our proc (in case of exit). 24912791Ssam */ 250*47547Skarels if (isvfork) 251*47547Skarels while (p2->p_flag & SPPWAIT) 252*47547Skarels sleep((caddr_t)p1, PZERO - 1); 25312791Ssam 25412791Ssam /* 255*47547Skarels * Return child pid to parent process. 256*47547Skarels * retval[1] was set above. 25712791Ssam */ 258*47547Skarels retval[0] = p2->p_pid; 25912791Ssam return (0); 26012791Ssam } 261