1 /* $OpenBSD: kern_exec.c,v 1.213 2020/02/15 09:35:48 anton Exp $ */ 2 /* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */ 3 4 /*- 5 * Copyright (C) 1993, 1994 Christopher G. Demetriou 6 * Copyright (C) 1992 Wolfgang Solfrank. 7 * Copyright (C) 1992 TooLs GmbH. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by TooLs GmbH. 21 * 4. The name of TooLs GmbH may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/filedesc.h> 39 #include <sys/kernel.h> 40 #include <sys/proc.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/pool.h> 44 #include <sys/namei.h> 45 #include <sys/vnode.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/acct.h> 49 #include <sys/exec.h> 50 #include <sys/ktrace.h> 51 #include <sys/resourcevar.h> 52 #include <sys/wait.h> 53 #include <sys/mman.h> 54 #include <sys/signalvar.h> 55 #include <sys/stat.h> 56 #include <sys/conf.h> 57 #include <sys/pledge.h> 58 #ifdef SYSVSHM 59 #include <sys/shm.h> 60 #endif 61 62 #include <sys/syscallargs.h> 63 64 #include <uvm/uvm_extern.h> 65 #include <machine/tcb.h> 66 67 void unveil_destroy(struct process *ps); 68 69 const struct kmem_va_mode kv_exec = { 70 .kv_wait = 1, 71 .kv_map = &exec_map 72 }; 73 74 /* 75 * Map the shared signal code. 76 */ 77 int exec_sigcode_map(struct process *, struct emul *); 78 79 /* 80 * If non-zero, stackgap_random specifies the upper limit of the random gap size 81 * added to the fixed stack position. Must be n^2. 82 */ 83 int stackgap_random = STACKGAP_RANDOM; 84 85 /* 86 * check exec: 87 * given an "executable" described in the exec package's namei info, 88 * see what we can do with it. 89 * 90 * ON ENTRY: 91 * exec package with appropriate namei info 92 * proc pointer of exec'ing proc 93 * NO SELF-LOCKED VNODES 94 * 95 * ON EXIT: 96 * error: nothing held, etc. exec header still allocated. 97 * ok: filled exec package, one locked vnode. 98 * 99 * EXEC SWITCH ENTRY: 100 * Locked vnode to check, exec package, proc. 101 * 102 * EXEC SWITCH EXIT: 103 * ok: return 0, filled exec package, one locked vnode. 104 * error: destructive: 105 * everything deallocated except exec header. 106 * non-destructive: 107 * error code, locked vnode, exec header unmodified 108 */ 109 int 110 check_exec(struct proc *p, struct exec_package *epp) 111 { 112 int error, i; 113 struct vnode *vp; 114 struct nameidata *ndp; 115 size_t resid; 116 117 ndp = epp->ep_ndp; 118 ndp->ni_cnd.cn_nameiop = LOOKUP; 119 ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME; 120 if (epp->ep_flags & EXEC_INDIR) 121 ndp->ni_cnd.cn_flags |= BYPASSUNVEIL; 122 /* first get the vnode */ 123 if ((error = namei(ndp)) != 0) 124 return (error); 125 epp->ep_vp = vp = ndp->ni_vp; 126 127 /* check for regular file */ 128 if (vp->v_type != VREG) { 129 error = EACCES; 130 goto bad1; 131 } 132 133 /* get attributes */ 134 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) 135 goto bad1; 136 137 /* Check mount point */ 138 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 139 error = EACCES; 140 goto bad1; 141 } 142 143 /* SUID programs may not be started with execpromises */ 144 if ((epp->ep_vap->va_mode & (VSUID | VSGID)) && 145 (p->p_p->ps_flags & PS_EXECPLEDGE)) { 146 error = EACCES; 147 goto bad1; 148 } 149 150 if ((vp->v_mount->mnt_flag & MNT_NOSUID)) 151 epp->ep_vap->va_mode &= ~(VSUID | VSGID); 152 153 /* check access. for root we have to see if any exec bit on */ 154 if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0) 155 goto bad1; 156 if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { 157 error = EACCES; 158 goto bad1; 159 } 160 161 /* try to open it */ 162 if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0) 163 goto bad1; 164 165 /* unlock vp, we need it unlocked from here */ 166 VOP_UNLOCK(vp); 167 168 /* now we have the file, get the exec header */ 169 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, 170 UIO_SYSSPACE, 0, p->p_ucred, &resid, p); 171 if (error) 172 goto bad2; 173 epp->ep_hdrvalid = epp->ep_hdrlen - resid; 174 175 /* 176 * set up the vmcmds for creation of the process 177 * address space 178 */ 179 error = ENOEXEC; 180 for (i = 0; i < nexecs && error != 0; i++) { 181 int newerror; 182 183 if (execsw[i].es_check == NULL) 184 continue; 185 newerror = (*execsw[i].es_check)(p, epp); 186 /* make sure the first "interesting" error code is saved. */ 187 if (!newerror || error == ENOEXEC) 188 error = newerror; 189 if (epp->ep_flags & EXEC_DESTR && error != 0) 190 return (error); 191 } 192 if (!error) { 193 /* check that entry point is sane */ 194 if (epp->ep_entry > VM_MAXUSER_ADDRESS) { 195 error = ENOEXEC; 196 } 197 198 /* check limits */ 199 if ((epp->ep_tsize > MAXTSIZ) || 200 (epp->ep_dsize > lim_cur(RLIMIT_DATA))) 201 error = ENOMEM; 202 203 if (!error) 204 return (0); 205 } 206 207 /* 208 * free any vmspace-creation commands, 209 * and release their references 210 */ 211 kill_vmcmds(&epp->ep_vmcmds); 212 213 bad2: 214 /* 215 * close the vnode, free the pathname buf, and punt. 216 */ 217 vn_close(vp, FREAD, p->p_ucred, p); 218 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 219 return (error); 220 221 bad1: 222 /* 223 * free the namei pathname buffer, and put the vnode 224 * (which we don't yet have open). 225 */ 226 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 227 vput(vp); 228 return (error); 229 } 230 231 /* 232 * exec system call 233 */ 234 int 235 sys_execve(struct proc *p, void *v, register_t *retval) 236 { 237 struct sys_execve_args /* { 238 syscallarg(const char *) path; 239 syscallarg(char *const *) argp; 240 syscallarg(char *const *) envp; 241 } */ *uap = v; 242 int error; 243 struct exec_package pack; 244 struct nameidata nid; 245 struct vattr attr; 246 struct ucred *cred = p->p_ucred; 247 char *argp; 248 char * const *cpp, *dp, *sp; 249 #ifdef KTRACE 250 char *env_start; 251 #endif 252 struct process *pr = p->p_p; 253 long argc, envc; 254 size_t len, sgap, dstsize; 255 #ifdef MACHINE_STACK_GROWS_UP 256 size_t slen; 257 #endif 258 char *stack; 259 struct ps_strings arginfo; 260 struct vmspace *vm; 261 extern struct emul emul_native; 262 struct vnode *otvp; 263 264 /* get other threads to stop */ 265 if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) 266 return (error); 267 268 /* 269 * Cheap solution to complicated problems. 270 * Mark this process as "leave me alone, I'm execing". 271 */ 272 atomic_setbits_int(&pr->ps_flags, PS_INEXEC); 273 274 NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); 275 nid.ni_pledge = PLEDGE_EXEC; 276 nid.ni_unveil = UNVEIL_EXEC; 277 278 /* 279 * initialize the fields of the exec package. 280 */ 281 pack.ep_name = (char *)SCARG(uap, path); 282 pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); 283 pack.ep_hdrlen = exec_maxhdrsz; 284 pack.ep_hdrvalid = 0; 285 pack.ep_ndp = &nid; 286 pack.ep_interp = NULL; 287 pack.ep_emul_arg = NULL; 288 VMCMDSET_INIT(&pack.ep_vmcmds); 289 pack.ep_vap = &attr; 290 pack.ep_emul = &emul_native; 291 pack.ep_flags = 0; 292 293 /* see if we can run it. */ 294 if ((error = check_exec(p, &pack)) != 0) { 295 goto freehdr; 296 } 297 298 /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ 299 300 /* allocate an argument buffer */ 301 argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); 302 #ifdef DIAGNOSTIC 303 if (argp == NULL) 304 panic("execve: argp == NULL"); 305 #endif 306 dp = argp; 307 argc = 0; 308 309 /* 310 * Copy the fake args list, if there's one, freeing it as we go. 311 * exec_script_makecmds() allocates either 2 or 3 fake args bounded 312 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen. 313 */ 314 if (pack.ep_flags & EXEC_HASARGL) { 315 dstsize = NCARGS; 316 for(; pack.ep_fa[argc] != NULL; argc++) { 317 len = strlcpy(dp, pack.ep_fa[argc], dstsize); 318 len++; 319 dp += len; dstsize -= len; 320 if (pack.ep_fa[argc+1] != NULL) 321 free(pack.ep_fa[argc], M_EXEC, len); 322 else 323 free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN); 324 } 325 free(pack.ep_fa, M_EXEC, 4 * sizeof(char *)); 326 pack.ep_flags &= ~EXEC_HASARGL; 327 } 328 329 /* Now get argv & environment */ 330 if (!(cpp = SCARG(uap, argp))) { 331 error = EFAULT; 332 goto bad; 333 } 334 335 if (pack.ep_flags & EXEC_SKIPARG) 336 cpp++; 337 338 while (1) { 339 len = argp + ARG_MAX - dp; 340 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 341 goto bad; 342 if (!sp) 343 break; 344 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 345 if (error == ENAMETOOLONG) 346 error = E2BIG; 347 goto bad; 348 } 349 dp += len; 350 cpp++; 351 argc++; 352 } 353 354 /* must have at least one argument */ 355 if (argc == 0) { 356 error = EINVAL; 357 goto bad; 358 } 359 360 #ifdef KTRACE 361 if (KTRPOINT(p, KTR_EXECARGS)) 362 ktrexec(p, KTR_EXECARGS, argp, dp - argp); 363 #endif 364 365 envc = 0; 366 /* environment does not need to be there */ 367 if ((cpp = SCARG(uap, envp)) != NULL ) { 368 #ifdef KTRACE 369 env_start = dp; 370 #endif 371 while (1) { 372 len = argp + ARG_MAX - dp; 373 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 374 goto bad; 375 if (!sp) 376 break; 377 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 378 if (error == ENAMETOOLONG) 379 error = E2BIG; 380 goto bad; 381 } 382 dp += len; 383 cpp++; 384 envc++; 385 } 386 387 #ifdef KTRACE 388 if (KTRPOINT(p, KTR_EXECENV)) 389 ktrexec(p, KTR_EXECENV, env_start, dp - env_start); 390 #endif 391 } 392 393 dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); 394 395 sgap = STACKGAPLEN; 396 397 /* 398 * If we have enabled random stackgap, the stack itself has already 399 * been moved from a random location, but is still aligned to a page 400 * boundary. Provide the lower bits of random placement now. 401 */ 402 if (stackgap_random != 0) { 403 sgap += arc4random() & PAGE_MASK; 404 sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; 405 } 406 407 /* Now check if args & environ fit into new stack */ 408 len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) + 409 sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; 410 411 len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; 412 413 if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ 414 error = ENOMEM; 415 goto bad; 416 } 417 418 /* adjust "active stack depth" for process VSZ */ 419 pack.ep_ssize = len; /* maybe should go elsewhere, but... */ 420 421 /* 422 * we're committed: any further errors will kill the process, so 423 * kill the other threads now. 424 */ 425 single_thread_set(p, SINGLE_EXIT, 0); 426 427 /* 428 * Prepare vmspace for remapping. Note that uvmspace_exec can replace 429 * ps_vmspace! 430 */ 431 uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 432 433 vm = pr->ps_vmspace; 434 /* Now map address space */ 435 vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); 436 vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - 437 trunc_page(pack.ep_taddr)); 438 vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); 439 vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - 440 trunc_page(pack.ep_daddr)); 441 vm->vm_dused = 0; 442 vm->vm_ssize = atop(round_page(pack.ep_ssize)); 443 vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; 444 vm->vm_minsaddr = (char *)pack.ep_minsaddr; 445 446 /* create the new process's VM space by running the vmcmds */ 447 #ifdef DIAGNOSTIC 448 if (pack.ep_vmcmds.evs_used == 0) 449 panic("execve: no vmcmds"); 450 #endif 451 error = exec_process_vmcmds(p, &pack); 452 453 /* if an error happened, deallocate and punt */ 454 if (error) 455 goto exec_abort; 456 457 #ifdef MACHINE_STACK_GROWS_UP 458 pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; 459 if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, 460 trunc_page(pr->ps_strings), PROT_NONE, TRUE)) 461 goto exec_abort; 462 #else 463 pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; 464 if (uvm_map_protect(&vm->vm_map, 465 round_page(pr->ps_strings + sizeof(arginfo)), 466 (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE)) 467 goto exec_abort; 468 #endif 469 470 memset(&arginfo, 0, sizeof(arginfo)); 471 472 /* remember information about the process */ 473 arginfo.ps_nargvstr = argc; 474 arginfo.ps_nenvstr = envc; 475 476 #ifdef MACHINE_STACK_GROWS_UP 477 stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; 478 slen = len - sizeof(arginfo) - sgap; 479 #else 480 stack = (char *)(vm->vm_minsaddr - len); 481 #endif 482 /* Now copy argc, args & environ to new stack */ 483 if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp)) 484 goto exec_abort; 485 486 /* copy out the process's ps_strings structure */ 487 if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) 488 goto exec_abort; 489 490 stopprofclock(pr); /* stop profiling */ 491 fdcloseexec(p); /* handle close on exec */ 492 execsigs(p); /* reset caught signals */ 493 TCB_SET(p, NULL); /* reset the TCB address */ 494 pr->ps_kbind_addr = 0; /* reset the kbind bits */ 495 pr->ps_kbind_cookie = 0; 496 arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie); 497 498 /* set command name & other accounting info */ 499 memset(pr->ps_comm, 0, sizeof(pr->ps_comm)); 500 len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN); 501 memcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, len); 502 pr->ps_acflag &= ~AFORK; 503 504 /* record proc's vnode, for use by sysctl */ 505 otvp = pr->ps_textvp; 506 vref(pack.ep_vp); 507 pr->ps_textvp = pack.ep_vp; 508 if (otvp) 509 vrele(otvp); 510 511 atomic_setbits_int(&pr->ps_flags, PS_EXEC); 512 if (pr->ps_flags & PS_PPWAIT) { 513 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 514 atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); 515 wakeup(pr->ps_pptr); 516 } 517 518 /* 519 * If process does execve() while it has a mismatched real, 520 * effective, or saved uid/gid, we set PS_SUGIDEXEC. 521 */ 522 if (cred->cr_uid != cred->cr_ruid || 523 cred->cr_uid != cred->cr_svuid || 524 cred->cr_gid != cred->cr_rgid || 525 cred->cr_gid != cred->cr_svgid) 526 atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); 527 else 528 atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); 529 530 if (pr->ps_flags & PS_EXECPLEDGE) { 531 pr->ps_pledge = pr->ps_execpledge; 532 atomic_setbits_int(&pr->ps_flags, PS_PLEDGE); 533 } else { 534 atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE); 535 pr->ps_pledge = 0; 536 /* XXX XXX XXX XXX */ 537 /* Clear our unveil paths out so the child 538 * starts afresh 539 */ 540 unveil_destroy(pr); 541 pr->ps_uvdone = 0; 542 } 543 544 /* 545 * deal with set[ug]id. 546 * MNT_NOEXEC has already been used to disable s[ug]id. 547 */ 548 if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { 549 int i; 550 551 atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); 552 553 #ifdef KTRACE 554 /* 555 * If process is being ktraced, turn off - unless 556 * root set it. 557 */ 558 if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) 559 ktrcleartrace(pr); 560 #endif 561 p->p_ucred = cred = crcopy(cred); 562 if (attr.va_mode & VSUID) 563 cred->cr_uid = attr.va_uid; 564 if (attr.va_mode & VSGID) 565 cred->cr_gid = attr.va_gid; 566 567 /* 568 * For set[ug]id processes, a few caveats apply to 569 * stdin, stdout, and stderr. 570 */ 571 error = 0; 572 fdplock(p->p_fd); 573 for (i = 0; i < 3; i++) { 574 struct file *fp = NULL; 575 576 /* 577 * NOTE - This will never return NULL because of 578 * immature fds. The file descriptor table is not 579 * shared because we're suid. 580 */ 581 fp = fd_getfile(p->p_fd, i); 582 583 /* 584 * Ensure that stdin, stdout, and stderr are already 585 * allocated. We do not want userland to accidentally 586 * allocate descriptors in this range which has implied 587 * meaning to libc. 588 */ 589 if (fp == NULL) { 590 short flags = FREAD | (i == 0 ? 0 : FWRITE); 591 struct vnode *vp; 592 int indx; 593 594 if ((error = falloc(p, &fp, &indx)) != 0) 595 break; 596 #ifdef DIAGNOSTIC 597 if (indx != i) 598 panic("sys_execve: falloc indx != i"); 599 #endif 600 if ((error = cdevvp(getnulldev(), &vp)) != 0) { 601 fdremove(p->p_fd, indx); 602 closef(fp, p); 603 break; 604 } 605 if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { 606 fdremove(p->p_fd, indx); 607 closef(fp, p); 608 vrele(vp); 609 break; 610 } 611 if (flags & FWRITE) 612 vp->v_writecount++; 613 fp->f_flag = flags; 614 fp->f_type = DTYPE_VNODE; 615 fp->f_ops = &vnops; 616 fp->f_data = (caddr_t)vp; 617 fdinsert(p->p_fd, indx, 0, fp); 618 } 619 FRELE(fp, p); 620 } 621 fdpunlock(p->p_fd); 622 if (error) 623 goto exec_abort; 624 } else 625 atomic_clearbits_int(&pr->ps_flags, PS_SUGID); 626 627 /* 628 * Reset the saved ugids and update the process's copy of the 629 * creds if the creds have been changed 630 */ 631 if (cred->cr_uid != cred->cr_svuid || 632 cred->cr_gid != cred->cr_svgid) { 633 /* make sure we have unshared ucreds */ 634 p->p_ucred = cred = crcopy(cred); 635 cred->cr_svuid = cred->cr_uid; 636 cred->cr_svgid = cred->cr_gid; 637 } 638 639 if (pr->ps_ucred != cred) { 640 struct ucred *ocred; 641 642 ocred = pr->ps_ucred; 643 crhold(cred); 644 pr->ps_ucred = cred; 645 crfree(ocred); 646 } 647 648 if (pr->ps_flags & PS_SUGIDEXEC) { 649 int i, s = splclock(); 650 651 timeout_del(&pr->ps_realit_to); 652 for (i = 0; i < nitems(pr->ps_timer); i++) { 653 timespecclear(&pr->ps_timer[i].it_interval); 654 timespecclear(&pr->ps_timer[i].it_value); 655 } 656 splx(s); 657 } 658 659 /* reset CPU time usage for the thread, but not the process */ 660 timespecclear(&p->p_tu.tu_runtime); 661 p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; 662 663 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 664 665 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 666 vn_close(pack.ep_vp, FREAD, cred, p); 667 668 /* 669 * notify others that we exec'd 670 */ 671 KNOTE(&pr->ps_klist, NOTE_EXEC); 672 673 /* setup new registers and do misc. setup. */ 674 if (pack.ep_emul->e_fixup != NULL) { 675 if ((*pack.ep_emul->e_fixup)(p, &pack) != 0) 676 goto free_pack_abort; 677 } 678 #ifdef MACHINE_STACK_GROWS_UP 679 (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval); 680 #else 681 (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval); 682 #endif 683 684 /* map the process's signal trampoline code */ 685 if (exec_sigcode_map(pr, pack.ep_emul)) 686 goto free_pack_abort; 687 688 #ifdef __HAVE_EXEC_MD_MAP 689 /* perform md specific mappings that process might need */ 690 if (exec_md_map(p, &pack)) 691 goto free_pack_abort; 692 #endif 693 694 if (pr->ps_flags & PS_TRACED) 695 psignal(p, SIGTRAP); 696 697 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 698 699 p->p_descfd = 255; 700 if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) 701 p->p_descfd = pack.ep_fd; 702 703 if (pack.ep_flags & EXEC_WXNEEDED) 704 atomic_setbits_int(&p->p_p->ps_flags, PS_WXNEEDED); 705 else 706 atomic_clearbits_int(&p->p_p->ps_flags, PS_WXNEEDED); 707 708 /* update ps_emul, the old value is no longer needed */ 709 pr->ps_emul = pack.ep_emul; 710 711 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 712 single_thread_clear(p, P_SUSPSIG); 713 714 return (0); 715 716 bad: 717 /* free the vmspace-creation commands, and release their references */ 718 kill_vmcmds(&pack.ep_vmcmds); 719 /* kill any opened file descriptor, if necessary */ 720 if (pack.ep_flags & EXEC_HASFD) { 721 pack.ep_flags &= ~EXEC_HASFD; 722 fdplock(p->p_fd); 723 /* fdrelease unlocks p->p_fd. */ 724 (void) fdrelease(p, pack.ep_fd); 725 } 726 if (pack.ep_interp != NULL) 727 pool_put(&namei_pool, pack.ep_interp); 728 if (pack.ep_emul_arg != NULL) 729 free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); 730 /* close and put the exec'd file */ 731 vn_close(pack.ep_vp, FREAD, cred, p); 732 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 733 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 734 735 freehdr: 736 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 737 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 738 single_thread_clear(p, P_SUSPSIG); 739 740 return (error); 741 742 exec_abort: 743 /* 744 * the old process doesn't exist anymore. exit gracefully. 745 * get rid of the (new) address space we have created, if any, get rid 746 * of our namei data and vnode, and exit noting failure 747 */ 748 uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 749 if (pack.ep_interp != NULL) 750 pool_put(&namei_pool, pack.ep_interp); 751 if (pack.ep_emul_arg != NULL) 752 free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); 753 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 754 vn_close(pack.ep_vp, FREAD, cred, p); 755 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 756 757 free_pack_abort: 758 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 759 exit1(p, 0, SIGABRT, EXIT_NORMAL); 760 761 /* NOTREACHED */ 762 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 763 764 return (0); 765 } 766 767 768 void * 769 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack, 770 void *argp) 771 { 772 char **cpp = stack; 773 char *dp, *sp; 774 size_t len; 775 void *nullp = NULL; 776 long argc = arginfo->ps_nargvstr; 777 int envc = arginfo->ps_nenvstr; 778 779 if (copyout(&argc, cpp++, sizeof(argc))) 780 return (NULL); 781 782 dp = (char *) (cpp + argc + envc + 2 + pack->ep_emul->e_arglen); 783 sp = argp; 784 785 /* XXX don't copy them out, remap them! */ 786 arginfo->ps_argvstr = cpp; /* remember location of argv for later */ 787 788 for (; --argc >= 0; sp += len, dp += len) 789 if (copyout(&dp, cpp++, sizeof(dp)) || 790 copyoutstr(sp, dp, ARG_MAX, &len)) 791 return (NULL); 792 793 if (copyout(&nullp, cpp++, sizeof(nullp))) 794 return (NULL); 795 796 arginfo->ps_envstr = cpp; /* remember location of envp for later */ 797 798 for (; --envc >= 0; sp += len, dp += len) 799 if (copyout(&dp, cpp++, sizeof(dp)) || 800 copyoutstr(sp, dp, ARG_MAX, &len)) 801 return (NULL); 802 803 if (copyout(&nullp, cpp++, sizeof(nullp))) 804 return (NULL); 805 806 return (cpp); 807 } 808 809 int 810 exec_sigcode_map(struct process *pr, struct emul *e) 811 { 812 vsize_t sz; 813 814 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode; 815 816 /* 817 * If we don't have a sigobject for this emulation, create one. 818 * 819 * sigobject is an anonymous memory object (just like SYSV shared 820 * memory) that we keep a permanent reference to and that we map 821 * in all processes that need this sigcode. The creation is simple, 822 * we create an object, add a permanent reference to it, map it in 823 * kernel space, copy out the sigcode to it and unmap it. 824 * Then we map it with PROT_READ|PROT_EXEC into the process just 825 * the way sys_mmap would map it. 826 */ 827 if (e->e_sigobject == NULL) { 828 extern int sigfillsiz; 829 extern u_char sigfill[]; 830 size_t off; 831 vaddr_t va; 832 int r; 833 834 e->e_sigobject = uao_create(sz, 0); 835 uao_reference(e->e_sigobject); /* permanent reference */ 836 837 if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject, 838 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 839 MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) { 840 uao_detach(e->e_sigobject); 841 return (ENOMEM); 842 } 843 844 for (off = 0; off < round_page(sz); off += sigfillsiz) 845 memcpy((caddr_t)va + off, sigfill, sigfillsiz); 846 memcpy((caddr_t)va, e->e_sigcode, sz); 847 uvm_unmap(kernel_map, va, va + round_page(sz)); 848 } 849 850 pr->ps_sigcode = 0; /* no hint */ 851 uao_reference(e->e_sigobject); 852 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz), 853 e->e_sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC, 854 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY, 855 MADV_RANDOM, UVM_FLAG_COPYONW | UVM_FLAG_SYSCALL))) { 856 uao_detach(e->e_sigobject); 857 return (ENOMEM); 858 } 859 860 /* Calculate PC at point of sigreturn entry */ 861 pr->ps_sigcoderet = pr->ps_sigcode + 862 (pr->ps_emul->e_esigret - pr->ps_emul->e_sigcode); 863 864 return (0); 865 } 866