1 /* kern_proc.c 3.19 07/13/80 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/map.h" 6 #include "../h/mtpr.h" 7 #include "../h/dir.h" 8 #include "../h/user.h" 9 #include "../h/proc.h" 10 #include "../h/buf.h" 11 #include "../h/reg.h" 12 #include "../h/inode.h" 13 #include "../h/seg.h" 14 #include "../h/acct.h" 15 #include "/usr/include/wait.h" 16 #include "../h/pte.h" 17 #include "../h/vm.h" 18 #include "../h/text.h" 19 #include "../h/psl.h" 20 #include "../h/limit.h" 21 22 /* 23 * exec system call, with and without environments. 24 */ 25 struct execa { 26 char *fname; 27 char **argp; 28 char **envp; 29 }; 30 31 exec() 32 { 33 ((struct execa *)u.u_ap)->envp = NULL; 34 exece(); 35 } 36 37 exece() 38 { 39 register nc; 40 register char *cp; 41 register struct buf *bp; 42 register struct execa *uap; 43 int na, ne, ucp, ap, c; 44 struct inode *ip; 45 swblk_t bno; 46 47 if ((ip = namei(uchar, 0)) == NULL) 48 return; 49 bno = 0; 50 bp = 0; 51 if(access(ip, IEXEC)) 52 goto bad; 53 if((ip->i_mode & IFMT) != IFREG || 54 (ip->i_mode & (IEXEC|(IEXEC>>3)|(IEXEC>>6))) == 0) { 55 u.u_error = EACCES; 56 goto bad; 57 } 58 /* 59 * Collect arguments on "file" in swap space. 60 */ 61 na = 0; 62 ne = 0; 63 nc = 0; 64 uap = (struct execa *)u.u_ap; 65 if ((bno = malloc(argmap, ctod(clrnd((int) btoc(NCARGS))))) == 0) { 66 swkill(u.u_procp, "exece"); 67 goto bad; 68 } 69 if (bno % CLSIZE) 70 panic("execa malloc"); 71 if (uap->argp) for (;;) { 72 ap = NULL; 73 if (uap->argp) { 74 ap = fuword((caddr_t)uap->argp); 75 uap->argp++; 76 } 77 if (ap==NULL && uap->envp) { 78 uap->argp = NULL; 79 if ((ap = fuword((caddr_t)uap->envp)) == NULL) 80 break; 81 uap->envp++; 82 ne++; 83 } 84 if (ap==NULL) 85 break; 86 na++; 87 if(ap == -1) 88 u.u_error = EFAULT; 89 do { 90 if (nc >= NCARGS-1) 91 u.u_error = E2BIG; 92 if ((c = fubyte((caddr_t)ap++)) < 0) 93 u.u_error = EFAULT; 94 if (u.u_error) { 95 if (bp) 96 brelse(bp); 97 bp = 0; 98 goto badarg; 99 } 100 if ((nc&BMASK) == 0) { 101 if (bp) 102 bdwrite(bp); 103 bp = getblk(argdev, 104 (daddr_t)(dbtofsb(bno)+(nc>>BSHIFT))); 105 cp = bp->b_un.b_addr; 106 } 107 nc++; 108 *cp++ = c; 109 } while (c>0); 110 } 111 if (bp) 112 bdwrite(bp); 113 bp = 0; 114 nc = (nc + NBPW-1) & ~(NBPW-1); 115 if (getxfile(ip, nc) || u.u_error) { 116 badarg: 117 for (c = 0; c < nc; c += BSIZE) 118 if (bp = baddr(argdev, dbtofsb(bno)+(c>>BSHIFT))) { 119 bp->b_flags |= B_AGE; /* throw away */ 120 bp->b_flags &= ~B_DELWRI; /* cancel io */ 121 brelse(bp); 122 bp = 0; 123 } 124 goto bad; 125 } 126 127 /* 128 * copy back arglist 129 */ 130 131 ucp = USRSTACK - nc - NBPW; 132 ap = ucp - na*NBPW - 3*NBPW; 133 u.u_ar0[SP] = ap; 134 (void) suword((caddr_t)ap, na-ne); 135 nc = 0; 136 for (;;) { 137 ap += NBPW; 138 if (na==ne) { 139 (void) suword((caddr_t)ap, 0); 140 ap += NBPW; 141 } 142 if (--na < 0) 143 break; 144 (void) suword((caddr_t)ap, ucp); 145 do { 146 if ((nc&BMASK) == 0) { 147 if (bp) 148 brelse(bp); 149 bp = bread(argdev, 150 (daddr_t)(dbtofsb(bno)+(nc>>BSHIFT))); 151 bp->b_flags |= B_AGE; /* throw away */ 152 bp->b_flags &= ~B_DELWRI; /* cancel io */ 153 cp = bp->b_un.b_addr; 154 } 155 (void) subyte((caddr_t)ucp++, (c = *cp++)); 156 nc++; 157 } while(c&0377); 158 } 159 (void) suword((caddr_t)ap, 0); 160 (void) suword((caddr_t)ucp, 0); 161 setregs(); 162 bad: 163 if (bp) 164 brelse(bp); 165 if (bno) 166 mfree(argmap, ctod(clrnd((int) btoc(NCARGS))), bno); 167 iput(ip); 168 } 169 170 /* 171 * Read in and set up memory for executed file. 172 * Zero return is normal; 173 * non-zero means only the text is being replaced 174 */ 175 getxfile(ip, nargc) 176 register struct inode *ip; 177 { 178 register sep; 179 register size_t ts, ds, ss; 180 register int overlay; 181 int pagi = 0; 182 183 /* 184 * read in first few bytes 185 * of file for segment 186 * sizes: 187 * ux_mag = 407/410/411/405 188 * 407 is plain executable 189 * 410 is RO text 190 * 411 is separated ID 191 * 405 is overlaid text 192 * 412 is demand paged plain executable (NOT IMPLEMENTED) 193 * 413 is demand paged RO text 194 */ 195 196 u.u_base = (caddr_t)&u.u_exdata; 197 u.u_count = sizeof(u.u_exdata); 198 u.u_offset = 0; 199 u.u_segflg = 1; 200 readi(ip); 201 u.u_segflg = 0; 202 if(u.u_error) 203 goto bad; 204 if (u.u_count!=0) { 205 u.u_error = ENOEXEC; 206 goto bad; 207 } 208 sep = 0; 209 overlay = 0; 210 switch (u.u_exdata.ux_mag) { 211 212 case 0405: 213 overlay++; 214 break; 215 216 case 0412: 217 u.u_error = ENOEXEC; 218 goto bad; 219 220 case 0407: 221 u.u_exdata.ux_dsize += u.u_exdata.ux_tsize; 222 u.u_exdata.ux_tsize = 0; 223 break; 224 225 case 0413: 226 pagi = SPAGI; 227 /* fall into ... */ 228 229 case 0410: 230 if (u.u_exdata.ux_tsize == 0) { 231 u.u_error = ENOEXEC; 232 goto bad; 233 } 234 break; 235 236 case 0411: 237 u.u_error = ENOEXEC; 238 goto bad; 239 240 default: 241 u.u_error = ENOEXEC; 242 goto bad; 243 } 244 if(u.u_exdata.ux_tsize!=0 && (ip->i_flag&ITEXT)==0 && ip->i_count!=1) { 245 u.u_error = ETXTBSY; 246 goto bad; 247 } 248 249 /* 250 * find text and data sizes 251 * try them out for possible 252 * exceed of max sizes 253 */ 254 255 ts = clrnd(btoc(u.u_exdata.ux_tsize)); 256 ds = clrnd(btoc((u.u_exdata.ux_dsize+u.u_exdata.ux_bsize))); 257 ss = clrnd(SSIZE + btoc(nargc)); 258 if (overlay) { 259 if ((u.u_procp->p_flag & SPAGI) || 260 u.u_sep==0 && ctos(ts) != ctos(u.u_tsize) || nargc) { 261 u.u_error = ENOMEM; 262 goto bad; 263 } 264 ds = u.u_dsize; 265 ss = u.u_ssize; 266 sep = u.u_sep; 267 xfree(); 268 xalloc(ip, pagi); 269 u.u_ar0[PC] = u.u_exdata.ux_entloc + 2; /* skip over entry mask */ 270 } else { 271 if (chksize(ts, ds, ss)) 272 goto bad; 273 u.u_cdmap = zdmap; 274 u.u_csmap = zdmap; 275 if (swpexpand(ds, ss, &u.u_cdmap, &u.u_csmap) == NULL) 276 goto bad; 277 278 /* 279 * At this point, committed to the new image! 280 * Release virtual memory resources of old process, and 281 * initialize the virtual memory of the new process. 282 * If we resulted from vfork(), instead wakeup our 283 * parent who will set SVFDONE when he has taken back 284 * our resources. 285 */ 286 u.u_prof.pr_scale = 0; 287 if ((u.u_procp->p_flag & SVFORK) == 0) 288 vrelvm(); 289 else { 290 u.u_procp->p_flag &= ~SVFORK; 291 u.u_procp->p_flag |= SKEEP; 292 wakeup((caddr_t)u.u_procp); 293 while ((u.u_procp->p_flag & SVFDONE) == 0) 294 sleep((caddr_t)u.u_procp, PZERO - 1); 295 u.u_procp->p_flag &= ~(SVFDONE|SKEEP); 296 } 297 u.u_procp->p_flag &= ~(SPAGI|SANOM|SUANOM); 298 u.u_procp->p_flag |= pagi; 299 u.u_dmap = u.u_cdmap; 300 u.u_smap = u.u_csmap; 301 vgetvm(ts, ds, ss); 302 303 if (pagi == 0) { 304 /* 305 * Read in data segment. 306 */ 307 u.u_base = (char *)ctob(ts); 308 u.u_offset = sizeof(u.u_exdata)+u.u_exdata.ux_tsize; 309 u.u_count = u.u_exdata.ux_dsize; 310 readi(ip); 311 } 312 xalloc(ip, pagi); 313 if (pagi && u.u_procp->p_textp) 314 vinifod((struct fpte *)dptopte(u.u_procp, 0), 315 PG_FTEXT, u.u_procp->p_textp->x_iptr, 316 1 + ts/CLSIZE, (int)btoc(u.u_exdata.ux_dsize)); 317 318 /* THIS SHOULD BE DONE AT A LOWER LEVEL, IF AT ALL */ 319 mtpr(TBIA, 0); 320 321 /* 322 * set SUID/SGID protections, if no tracing 323 */ 324 if ((u.u_procp->p_flag&STRC)==0) { 325 if(ip->i_mode&ISUID) 326 if(u.u_uid != 0) { 327 u.u_uid = ip->i_uid; 328 u.u_procp->p_uid = ip->i_uid; 329 } 330 if(ip->i_mode&ISGID) 331 u.u_gid = ip->i_gid; 332 } else 333 psignal(u.u_procp, SIGTRAP); 334 } 335 u.u_tsize = ts; 336 u.u_dsize = ds; 337 u.u_ssize = ss; 338 u.u_sep = sep; 339 bad: 340 return(overlay); 341 } 342 343 /* 344 * Clear registers on exec 345 */ 346 setregs() 347 { 348 register int (**rp)(); 349 register i; 350 long sigmask; 351 352 for(rp = &u.u_signal[0], sigmask = 1L; rp < &u.u_signal[NSIG]; 353 sigmask <<= 1, rp++) { 354 switch (*rp) { 355 356 case SIG_IGN: 357 case SIG_DFL: 358 case SIG_HOLD: 359 continue; 360 361 default: 362 /* 363 * Normal or deferring catch; revert to default. 364 */ 365 (void) spl6(); 366 *rp = SIG_DFL; 367 if ((int)*rp & 1) 368 u.u_procp->p_siga0 |= sigmask; 369 else 370 u.u_procp->p_siga1 &= ~sigmask; 371 if ((int)*rp & 2) 372 u.u_procp->p_siga1 |= sigmask; 373 else 374 u.u_procp->p_siga1 &= ~sigmask; 375 (void) spl0(); 376 continue; 377 } 378 } 379 /* 380 for(rp = &u.u_ar0[0]; rp < &u.u_ar0[16];) 381 *rp++ = 0; 382 */ 383 u.u_ar0[PC] = u.u_exdata.ux_entloc + 2; /* skip over entry mask */ 384 for(i=0; i<NOFILE; i++) { 385 if (u.u_pofile[i]&EXCLOSE) { 386 closef(u.u_ofile[i]); 387 u.u_ofile[i] = NULL; 388 u.u_pofile[i] &= ~EXCLOSE; 389 } 390 } 391 /* 392 * Remember file name for accounting. 393 */ 394 u.u_acflag &= ~AFORK; 395 bcopy((caddr_t)u.u_dbuf, (caddr_t)u.u_comm, DIRSIZ); 396 } 397 398 /* 399 * exit system call: 400 * pass back caller's arg 401 */ 402 rexit() 403 { 404 register struct a { 405 int rval; 406 } *uap; 407 408 uap = (struct a *)u.u_ap; 409 exit((uap->rval & 0377) << 8); 410 } 411 412 /* 413 * Release resources. 414 * Save u. area for parent to look at. 415 * Enter zombie state. 416 * Wake up parent and init processes, 417 * and dispose of children. 418 */ 419 exit(rv) 420 { 421 register int i; 422 register struct proc *p, *q; 423 register struct file *f; 424 register int x; 425 426 #ifdef PGINPROF 427 vmsizmon(); 428 #endif 429 p = u.u_procp; 430 p->p_flag &= ~(STRC|SULOCK); 431 p->p_flag |= SWEXIT; 432 p->p_clktim = 0; 433 (void) spl6(); 434 if ((int)SIG_IGN & 1) 435 p->p_siga0 = ~0; 436 else 437 p->p_siga0 = 0; 438 if ((int)SIG_IGN & 2) 439 p->p_siga1 = ~0; 440 else 441 p->p_siga1 = 0; 442 (void) spl0(); 443 p->p_aveflt = 0; 444 for(i=0; i<NSIG; i++) 445 u.u_signal[i] = SIG_IGN; 446 /* 447 * Release virtual memory. If we resulted from 448 * a vfork(), instead give the resources back to 449 * the parent. 450 */ 451 if ((p->p_flag & SVFORK) == 0) 452 vrelvm(); 453 else { 454 p->p_flag &= ~SVFORK; 455 wakeup((caddr_t)p); 456 while ((p->p_flag & SVFDONE) == 0) 457 sleep((caddr_t)p, PZERO - 1); 458 p->p_flag &= ~SVFDONE; 459 } 460 for(i=0; i<NOFILE; i++) { 461 f = u.u_ofile[i]; 462 u.u_ofile[i] = NULL; 463 closef(f); 464 } 465 plock(u.u_cdir); 466 iput(u.u_cdir); 467 if (u.u_rdir) { 468 plock(u.u_rdir); 469 iput(u.u_rdir); 470 } 471 u.u_limit[LIM_FSIZE] = INFINITY; 472 acct(); 473 vrelpt(u.u_procp); 474 vrelu(u.u_procp, 0); 475 multprog--; 476 /* spl7(); /* clock will get mad because of overlaying */ 477 noproc = 1; 478 p->p_stat = SZOMB; 479 i = PIDHASH(p->p_pid); 480 x = p - proc; 481 if (pidhash[i] == x) 482 pidhash[i] = p->p_idhash; 483 else { 484 for (i = pidhash[i]; i != 0; i = proc[i].p_idhash) 485 if (proc[i].p_idhash == x) { 486 proc[i].p_idhash = p->p_idhash; 487 goto done; 488 } 489 panic("exit"); 490 } 491 done: 492 ((struct xproc *)p)->xp_xstat = rv; /* overlay */ 493 ((struct xproc *)p)->xp_vm = u.u_vm; /* overlay */ 494 vmsadd(&((struct xproc *)p)->xp_vm, &u.u_cvm); 495 for(q = &proc[0]; q < &proc[NPROC]; q++) 496 if(q->p_pptr == p) { 497 q->p_pptr = &proc[1]; 498 q->p_ppid = 1; 499 wakeup((caddr_t)&proc[1]); 500 /* 501 * Traced processes are killed 502 * since their existence means someone is screwing up. 503 * Stopped processes are sent a hangup and a continue. 504 * This is designed to be ``safe'' for setuid 505 * processes since they must be willing to tolerate 506 * hangups anyways. 507 */ 508 if (q->p_flag&STRC) { 509 q->p_flag &= ~STRC; 510 psignal(q, SIGKILL); 511 } else if (q->p_stat == SSTOP) { 512 psignal(q, SIGHUP); 513 psignal(q, SIGCONT); 514 } 515 /* 516 * Protect this process from future 517 * tty signals, clear TSTP/TTIN/TTOU if pending, 518 * and set SDETACH bit on procs. 519 */ 520 spgrp(q, -1); 521 } 522 wakeup((caddr_t)p->p_pptr); 523 psignal(p->p_pptr, SIGCHLD); 524 swtch(); 525 } 526 527 wait() 528 { 529 struct vtimes vm; 530 struct vtimes *vp; 531 532 if ((u.u_ar0[PS] & PSL_ALLCC) != PSL_ALLCC) { 533 wait1(0, (struct vtimes *)0); 534 return; 535 } 536 vp = (struct vtimes *)u.u_ar0[R1]; 537 wait1(u.u_ar0[R0], &vm); 538 if (u.u_error) 539 return; 540 (void) copyout((caddr_t)&vm, (caddr_t)vp, sizeof (struct vtimes)); 541 } 542 543 /* 544 * Wait system call. 545 * Search for a terminated (zombie) child, 546 * finally lay it to rest, and collect its status. 547 * Look also for stopped (traced) children, 548 * and pass back status from them. 549 */ 550 wait1(options, vp) 551 register options; 552 struct vtimes *vp; 553 { 554 register f; 555 register struct proc *p; 556 557 f = 0; 558 loop: 559 for(p = &proc[0]; p < &proc[NPROC]; p++) 560 if(p->p_pptr == u.u_procp) { 561 f++; 562 if(p->p_stat == SZOMB) { 563 u.u_r.r_val1 = p->p_pid; 564 u.u_r.r_val2 = ((struct xproc *)p)->xp_xstat; 565 ((struct xproc *)p)->xp_xstat = 0; 566 if (vp) 567 *vp = ((struct xproc *)p)->xp_vm; 568 vmsadd(&u.u_cvm, &((struct xproc *)p)->xp_vm); 569 ((struct xproc *)p)->xp_vm = zvms; 570 p->p_stat = NULL; 571 p->p_pid = 0; 572 p->p_ppid = 0; 573 p->p_pptr = 0; 574 p->p_sig = 0; 575 p->p_siga0 = 0; 576 p->p_siga1 = 0; 577 p->p_pgrp = 0; 578 p->p_flag = 0; 579 p->p_wchan = 0; 580 p->p_cursig = 0; 581 return; 582 } 583 if (p->p_stat == SSTOP && (p->p_flag&SWTED)==0 && 584 (p->p_flag&STRC || options&WUNTRACED)) { 585 p->p_flag |= SWTED; 586 u.u_r.r_val1 = p->p_pid; 587 u.u_r.r_val2 = (p->p_cursig<<8) | WSTOPPED; 588 return; 589 } 590 } 591 if (f==0) { 592 u.u_error = ECHILD; 593 return; 594 } 595 if (options&WNOHANG) { 596 u.u_r.r_val1 = 0; 597 return; 598 } 599 /* 600 if (setjmp(u.u_qsav)) { 601 u.u_eosys = RESTARTSYS; 602 return; 603 } 604 */ 605 sleep((caddr_t)u.u_procp, PWAIT); 606 goto loop; 607 } 608 609 /* 610 * fork system call. 611 */ 612 fork() 613 { 614 615 u.u_cdmap = zdmap; 616 u.u_csmap = zdmap; 617 if (swpexpand(u.u_dsize, u.u_ssize, &u.u_cdmap, &u.u_csmap) == 0) { 618 u.u_r.r_val2 = 0; 619 return; 620 } 621 fork1(0); 622 } 623 624 fork1(isvfork) 625 { 626 register struct proc *p1, *p2; 627 register a; 628 629 a = 0; 630 p2 = NULL; 631 for(p1 = &proc[0]; p1 < &proc[NPROC]; p1++) { 632 if (p1->p_stat==NULL && p2==NULL) 633 p2 = p1; 634 else { 635 if (p1->p_uid==u.u_uid && p1->p_stat!=NULL) 636 a++; 637 } 638 } 639 /* 640 * Disallow if 641 * No processes at all; 642 * not su and too many procs owned; or 643 * not su and would take last slot. 644 */ 645 if (p2==NULL || (u.u_uid!=0 && (p2==&proc[NPROC-1] || a>MAXUPRC))) { 646 u.u_error = EAGAIN; 647 if (!isvfork) { 648 (void) vsexpand(0, &u.u_cdmap, 1); 649 (void) vsexpand(0, &u.u_csmap, 1); 650 } 651 goto out; 652 } 653 p1 = u.u_procp; 654 if(newproc(isvfork)) { 655 u.u_r.r_val1 = p1->p_pid; 656 u.u_r.r_val2 = 1; /* child */ 657 u.u_start = time; 658 u.u_acflag = AFORK; 659 return; 660 } 661 u.u_r.r_val1 = p2->p_pid; 662 663 out: 664 u.u_r.r_val2 = 0; 665 } 666 667 /* 668 * break system call. 669 * -- bad planning: "break" is a dirty word in C. 670 */ 671 sbreak() 672 { 673 struct a { 674 char *nsiz; 675 }; 676 register int n, d; 677 678 /* 679 * set n to new data size 680 * set d to new-old 681 */ 682 683 n = btoc(((struct a *)u.u_ap)->nsiz); 684 if (!u.u_sep) 685 n -= ctos(u.u_tsize) * stoc(1); 686 if (n < 0) 687 n = 0; 688 d = clrnd(n - u.u_dsize); 689 if (ctob(u.u_dsize+d) > u.u_limit[LIM_DATA]) { 690 u.u_error = ENOMEM; 691 return; 692 } 693 if (chksize(u.u_tsize, u.u_dsize+d, u.u_ssize)) 694 return; 695 if (swpexpand(u.u_dsize+d, u.u_ssize, &u.u_dmap, &u.u_smap)==0) 696 return; 697 expand(d, P0BR); 698 } 699