1 /* $OpenBSD: kern_exec.c,v 1.202 2018/10/30 03:27:45 deraadt Exp $ */ 2 /* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */ 3 4 /*- 5 * Copyright (C) 1993, 1994 Christopher G. Demetriou 6 * Copyright (C) 1992 Wolfgang Solfrank. 7 * Copyright (C) 1992 TooLs GmbH. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by TooLs GmbH. 21 * 4. The name of TooLs GmbH may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/filedesc.h> 39 #include <sys/kernel.h> 40 #include <sys/proc.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/pool.h> 44 #include <sys/namei.h> 45 #include <sys/vnode.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/acct.h> 49 #include <sys/exec.h> 50 #include <sys/ktrace.h> 51 #include <sys/resourcevar.h> 52 #include <sys/wait.h> 53 #include <sys/mman.h> 54 #include <sys/signalvar.h> 55 #include <sys/stat.h> 56 #include <sys/conf.h> 57 #include <sys/pledge.h> 58 #ifdef SYSVSHM 59 #include <sys/shm.h> 60 #endif 61 62 #include <sys/syscallargs.h> 63 64 #include <uvm/uvm_extern.h> 65 #include <machine/tcb.h> 66 67 void unveil_destroy(struct process *ps); 68 69 const struct kmem_va_mode kv_exec = { 70 .kv_wait = 1, 71 .kv_map = &exec_map 72 }; 73 74 /* 75 * Map the shared signal code. 76 */ 77 int exec_sigcode_map(struct process *, struct emul *); 78 79 /* 80 * If non-zero, stackgap_random specifies the upper limit of the random gap size 81 * added to the fixed stack position. Must be n^2. 82 */ 83 int stackgap_random = STACKGAP_RANDOM; 84 85 /* 86 * check exec: 87 * given an "executable" described in the exec package's namei info, 88 * see what we can do with it. 89 * 90 * ON ENTRY: 91 * exec package with appropriate namei info 92 * proc pointer of exec'ing proc 93 * NO SELF-LOCKED VNODES 94 * 95 * ON EXIT: 96 * error: nothing held, etc. exec header still allocated. 97 * ok: filled exec package, one locked vnode. 98 * 99 * EXEC SWITCH ENTRY: 100 * Locked vnode to check, exec package, proc. 101 * 102 * EXEC SWITCH EXIT: 103 * ok: return 0, filled exec package, one locked vnode. 104 * error: destructive: 105 * everything deallocated except exec header. 106 * non-destructive: 107 * error code, locked vnode, exec header unmodified 108 */ 109 int 110 check_exec(struct proc *p, struct exec_package *epp) 111 { 112 int error, i; 113 struct vnode *vp; 114 struct nameidata *ndp; 115 size_t resid; 116 117 ndp = epp->ep_ndp; 118 ndp->ni_cnd.cn_nameiop = LOOKUP; 119 ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME; 120 if (epp->ep_flags & EXEC_INDIR) 121 ndp->ni_cnd.cn_flags |= BYPASSUNVEIL; 122 /* first get the vnode */ 123 if ((error = namei(ndp)) != 0) 124 return (error); 125 epp->ep_vp = vp = ndp->ni_vp; 126 127 /* check for regular file */ 128 if (vp->v_type == VDIR) { 129 error = EISDIR; 130 goto bad1; 131 } 132 if (vp->v_type != VREG) { 133 error = EACCES; 134 goto bad1; 135 } 136 137 /* get attributes */ 138 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) 139 goto bad1; 140 141 /* Check mount point */ 142 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 143 error = EACCES; 144 goto bad1; 145 } 146 147 /* SUID programs may not be started with execpromises */ 148 if ((epp->ep_vap->va_mode & (VSUID | VSGID)) && 149 (p->p_p->ps_flags & PS_EXECPLEDGE)) { 150 error = EACCES; 151 goto bad1; 152 } 153 154 if ((vp->v_mount->mnt_flag & MNT_NOSUID)) 155 epp->ep_vap->va_mode &= ~(VSUID | VSGID); 156 157 /* check access. for root we have to see if any exec bit on */ 158 if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0) 159 goto bad1; 160 if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { 161 error = EACCES; 162 goto bad1; 163 } 164 165 /* try to open it */ 166 if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0) 167 goto bad1; 168 169 /* unlock vp, we need it unlocked from here */ 170 VOP_UNLOCK(vp); 171 172 /* now we have the file, get the exec header */ 173 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, 174 UIO_SYSSPACE, 0, p->p_ucred, &resid, p); 175 if (error) 176 goto bad2; 177 epp->ep_hdrvalid = epp->ep_hdrlen - resid; 178 179 /* 180 * set up the vmcmds for creation of the process 181 * address space 182 */ 183 error = ENOEXEC; 184 for (i = 0; i < nexecs && error != 0; i++) { 185 int newerror; 186 187 if (execsw[i].es_check == NULL) 188 continue; 189 newerror = (*execsw[i].es_check)(p, epp); 190 /* make sure the first "interesting" error code is saved. */ 191 if (!newerror || error == ENOEXEC) 192 error = newerror; 193 if (epp->ep_flags & EXEC_DESTR && error != 0) 194 return (error); 195 } 196 if (!error) { 197 /* check that entry point is sane */ 198 if (epp->ep_entry > VM_MAXUSER_ADDRESS) { 199 error = ENOEXEC; 200 } 201 202 /* check limits */ 203 if ((epp->ep_tsize > MAXTSIZ) || 204 (epp->ep_dsize > p->p_rlimit[RLIMIT_DATA].rlim_cur)) 205 error = ENOMEM; 206 207 if (!error) 208 return (0); 209 } 210 211 /* 212 * free any vmspace-creation commands, 213 * and release their references 214 */ 215 kill_vmcmds(&epp->ep_vmcmds); 216 217 bad2: 218 /* 219 * close the vnode, free the pathname buf, and punt. 220 */ 221 vn_close(vp, FREAD, p->p_ucred, p); 222 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 223 return (error); 224 225 bad1: 226 /* 227 * free the namei pathname buffer, and put the vnode 228 * (which we don't yet have open). 229 */ 230 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 231 vput(vp); 232 return (error); 233 } 234 235 /* 236 * exec system call 237 */ 238 int 239 sys_execve(struct proc *p, void *v, register_t *retval) 240 { 241 struct sys_execve_args /* { 242 syscallarg(const char *) path; 243 syscallarg(char *const *) argp; 244 syscallarg(char *const *) envp; 245 } */ *uap = v; 246 int error; 247 struct exec_package pack; 248 struct nameidata nid; 249 struct vattr attr; 250 struct ucred *cred = p->p_ucred; 251 char *argp; 252 char * const *cpp, *dp, *sp; 253 #ifdef KTRACE 254 char *env_start; 255 #endif 256 struct process *pr = p->p_p; 257 long argc, envc; 258 size_t len, sgap, dstsize; 259 #ifdef MACHINE_STACK_GROWS_UP 260 size_t slen; 261 #endif 262 char *stack; 263 struct ps_strings arginfo; 264 struct vmspace *vm; 265 extern struct emul emul_native; 266 struct vnode *otvp; 267 268 /* get other threads to stop */ 269 if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) 270 return (error); 271 272 /* 273 * Cheap solution to complicated problems. 274 * Mark this process as "leave me alone, I'm execing". 275 */ 276 atomic_setbits_int(&pr->ps_flags, PS_INEXEC); 277 278 NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); 279 nid.ni_pledge = PLEDGE_EXEC; 280 nid.ni_unveil = UNVEIL_EXEC; 281 282 /* 283 * initialize the fields of the exec package. 284 */ 285 pack.ep_name = (char *)SCARG(uap, path); 286 pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); 287 pack.ep_hdrlen = exec_maxhdrsz; 288 pack.ep_hdrvalid = 0; 289 pack.ep_ndp = &nid; 290 pack.ep_interp = NULL; 291 pack.ep_emul_arg = NULL; 292 VMCMDSET_INIT(&pack.ep_vmcmds); 293 pack.ep_vap = &attr; 294 pack.ep_emul = &emul_native; 295 pack.ep_flags = 0; 296 297 /* see if we can run it. */ 298 if ((error = check_exec(p, &pack)) != 0) { 299 goto freehdr; 300 } 301 302 /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ 303 304 /* allocate an argument buffer */ 305 argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); 306 #ifdef DIAGNOSTIC 307 if (argp == NULL) 308 panic("execve: argp == NULL"); 309 #endif 310 dp = argp; 311 argc = 0; 312 313 /* 314 * Copy the fake args list, if there's one, freeing it as we go. 315 * exec_script_makecmds() allocates either 2 or 3 fake args bounded 316 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen. 317 */ 318 if (pack.ep_flags & EXEC_HASARGL) { 319 dstsize = NCARGS; 320 for(; pack.ep_fa[argc] != NULL; argc++) { 321 len = strlcpy(dp, pack.ep_fa[argc], dstsize); 322 len++; 323 dp += len; dstsize -= len; 324 if (pack.ep_fa[argc+1] != NULL) 325 free(pack.ep_fa[argc], M_EXEC, len); 326 else 327 free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN); 328 } 329 free(pack.ep_fa, M_EXEC, 4 * sizeof(char *)); 330 pack.ep_flags &= ~EXEC_HASARGL; 331 } 332 333 /* Now get argv & environment */ 334 if (!(cpp = SCARG(uap, argp))) { 335 error = EFAULT; 336 goto bad; 337 } 338 339 if (pack.ep_flags & EXEC_SKIPARG) 340 cpp++; 341 342 while (1) { 343 len = argp + ARG_MAX - dp; 344 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 345 goto bad; 346 if (!sp) 347 break; 348 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 349 if (error == ENAMETOOLONG) 350 error = E2BIG; 351 goto bad; 352 } 353 dp += len; 354 cpp++; 355 argc++; 356 } 357 358 /* must have at least one argument */ 359 if (argc == 0) { 360 error = EINVAL; 361 goto bad; 362 } 363 364 #ifdef KTRACE 365 if (KTRPOINT(p, KTR_EXECARGS)) 366 ktrexec(p, KTR_EXECARGS, argp, dp - argp); 367 #endif 368 369 envc = 0; 370 /* environment does not need to be there */ 371 if ((cpp = SCARG(uap, envp)) != NULL ) { 372 #ifdef KTRACE 373 env_start = dp; 374 #endif 375 while (1) { 376 len = argp + ARG_MAX - dp; 377 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 378 goto bad; 379 if (!sp) 380 break; 381 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 382 if (error == ENAMETOOLONG) 383 error = E2BIG; 384 goto bad; 385 } 386 dp += len; 387 cpp++; 388 envc++; 389 } 390 391 #ifdef KTRACE 392 if (KTRPOINT(p, KTR_EXECENV)) 393 ktrexec(p, KTR_EXECENV, env_start, dp - env_start); 394 #endif 395 } 396 397 dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); 398 399 sgap = STACKGAPLEN; 400 401 /* 402 * If we have enabled random stackgap, the stack itself has already 403 * been moved from a random location, but is still aligned to a page 404 * boundary. Provide the lower bits of random placement now. 405 */ 406 if (stackgap_random != 0) { 407 sgap += arc4random() & PAGE_MASK; 408 sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; 409 } 410 411 /* Now check if args & environ fit into new stack */ 412 len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) + 413 sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; 414 415 len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; 416 417 if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ 418 error = ENOMEM; 419 goto bad; 420 } 421 422 /* adjust "active stack depth" for process VSZ */ 423 pack.ep_ssize = len; /* maybe should go elsewhere, but... */ 424 425 /* 426 * we're committed: any further errors will kill the process, so 427 * kill the other threads now. 428 */ 429 single_thread_set(p, SINGLE_EXIT, 0); 430 431 /* 432 * Prepare vmspace for remapping. Note that uvmspace_exec can replace 433 * ps_vmspace! 434 */ 435 uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 436 437 vm = pr->ps_vmspace; 438 /* Now map address space */ 439 vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); 440 vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - 441 trunc_page(pack.ep_taddr)); 442 vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); 443 vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - 444 trunc_page(pack.ep_daddr)); 445 vm->vm_dused = 0; 446 vm->vm_ssize = atop(round_page(pack.ep_ssize)); 447 vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; 448 vm->vm_minsaddr = (char *)pack.ep_minsaddr; 449 450 /* create the new process's VM space by running the vmcmds */ 451 #ifdef DIAGNOSTIC 452 if (pack.ep_vmcmds.evs_used == 0) 453 panic("execve: no vmcmds"); 454 #endif 455 error = exec_process_vmcmds(p, &pack); 456 457 /* if an error happened, deallocate and punt */ 458 if (error) 459 goto exec_abort; 460 461 #ifdef MACHINE_STACK_GROWS_UP 462 pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; 463 if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, 464 trunc_page(pr->ps_strings), PROT_NONE, TRUE)) 465 goto exec_abort; 466 #else 467 pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; 468 if (uvm_map_protect(&vm->vm_map, 469 round_page(pr->ps_strings + sizeof(arginfo)), 470 (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE)) 471 goto exec_abort; 472 #endif 473 474 /* remember information about the process */ 475 arginfo.ps_nargvstr = argc; 476 arginfo.ps_nenvstr = envc; 477 478 #ifdef MACHINE_STACK_GROWS_UP 479 stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; 480 slen = len - sizeof(arginfo) - sgap; 481 #else 482 stack = (char *)(vm->vm_minsaddr - len); 483 #endif 484 /* Now copy argc, args & environ to new stack */ 485 if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp)) 486 goto exec_abort; 487 488 /* copy out the process's ps_strings structure */ 489 if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) 490 goto exec_abort; 491 492 stopprofclock(pr); /* stop profiling */ 493 fdcloseexec(p); /* handle close on exec */ 494 execsigs(p); /* reset caught signals */ 495 TCB_SET(p, NULL); /* reset the TCB address */ 496 pr->ps_kbind_addr = 0; /* reset the kbind bits */ 497 pr->ps_kbind_cookie = 0; 498 arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie); 499 500 /* set command name & other accounting info */ 501 memset(pr->ps_comm, 0, sizeof(pr->ps_comm)); 502 len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN); 503 memcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, len); 504 pr->ps_acflag &= ~AFORK; 505 506 /* record proc's vnode, for use by sysctl */ 507 otvp = pr->ps_textvp; 508 vref(pack.ep_vp); 509 pr->ps_textvp = pack.ep_vp; 510 if (otvp) 511 vrele(otvp); 512 513 atomic_setbits_int(&pr->ps_flags, PS_EXEC); 514 if (pr->ps_flags & PS_PPWAIT) { 515 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 516 atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); 517 wakeup(pr->ps_pptr); 518 } 519 520 /* 521 * If process does execve() while it has a mismatched real, 522 * effective, or saved uid/gid, we set PS_SUGIDEXEC. 523 */ 524 if (cred->cr_uid != cred->cr_ruid || 525 cred->cr_uid != cred->cr_svuid || 526 cred->cr_gid != cred->cr_rgid || 527 cred->cr_gid != cred->cr_svgid) 528 atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); 529 else 530 atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); 531 532 if (pr->ps_flags & PS_EXECPLEDGE) { 533 pr->ps_pledge = pr->ps_execpledge; 534 atomic_setbits_int(&pr->ps_flags, PS_PLEDGE); 535 } else { 536 atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE); 537 pr->ps_pledge = 0; 538 /* XXX XXX XXX XXX */ 539 /* Clear our unveil paths out so the child 540 * starts afresh 541 */ 542 unveil_destroy(pr); 543 pr->ps_uvdone = 0; 544 } 545 546 /* 547 * deal with set[ug]id. 548 * MNT_NOEXEC has already been used to disable s[ug]id. 549 */ 550 if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { 551 int i; 552 553 atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); 554 555 #ifdef KTRACE 556 /* 557 * If process is being ktraced, turn off - unless 558 * root set it. 559 */ 560 if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) 561 ktrcleartrace(pr); 562 #endif 563 p->p_ucred = cred = crcopy(cred); 564 if (attr.va_mode & VSUID) 565 cred->cr_uid = attr.va_uid; 566 if (attr.va_mode & VSGID) 567 cred->cr_gid = attr.va_gid; 568 569 /* 570 * For set[ug]id processes, a few caveats apply to 571 * stdin, stdout, and stderr. 572 */ 573 error = 0; 574 fdplock(p->p_fd); 575 for (i = 0; i < 3; i++) { 576 struct file *fp = NULL; 577 578 /* 579 * NOTE - This will never return NULL because of 580 * immature fds. The file descriptor table is not 581 * shared because we're suid. 582 */ 583 fp = fd_getfile(p->p_fd, i); 584 585 /* 586 * Ensure that stdin, stdout, and stderr are already 587 * allocated. We do not want userland to accidentally 588 * allocate descriptors in this range which has implied 589 * meaning to libc. 590 */ 591 if (fp == NULL) { 592 short flags = FREAD | (i == 0 ? 0 : FWRITE); 593 struct vnode *vp; 594 int indx; 595 596 if ((error = falloc(p, &fp, &indx)) != 0) 597 break; 598 #ifdef DIAGNOSTIC 599 if (indx != i) 600 panic("sys_execve: falloc indx != i"); 601 #endif 602 if ((error = cdevvp(getnulldev(), &vp)) != 0) { 603 fdremove(p->p_fd, indx); 604 closef(fp, p); 605 break; 606 } 607 if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { 608 fdremove(p->p_fd, indx); 609 closef(fp, p); 610 vrele(vp); 611 break; 612 } 613 if (flags & FWRITE) 614 vp->v_writecount++; 615 fp->f_flag = flags; 616 fp->f_type = DTYPE_VNODE; 617 fp->f_ops = &vnops; 618 fp->f_data = (caddr_t)vp; 619 fdinsert(p->p_fd, indx, 0, fp); 620 } 621 FRELE(fp, p); 622 } 623 fdpunlock(p->p_fd); 624 if (error) 625 goto exec_abort; 626 } else 627 atomic_clearbits_int(&pr->ps_flags, PS_SUGID); 628 629 /* 630 * Reset the saved ugids and update the process's copy of the 631 * creds if the creds have been changed 632 */ 633 if (cred->cr_uid != cred->cr_svuid || 634 cred->cr_gid != cred->cr_svgid) { 635 /* make sure we have unshared ucreds */ 636 p->p_ucred = cred = crcopy(cred); 637 cred->cr_svuid = cred->cr_uid; 638 cred->cr_svgid = cred->cr_gid; 639 } 640 641 if (pr->ps_ucred != cred) { 642 struct ucred *ocred; 643 644 ocred = pr->ps_ucred; 645 crhold(cred); 646 pr->ps_ucred = cred; 647 crfree(ocred); 648 } 649 650 if (pr->ps_flags & PS_SUGIDEXEC) { 651 int i, s = splclock(); 652 653 timeout_del(&pr->ps_realit_to); 654 for (i = 0; i < nitems(pr->ps_timer); i++) { 655 timerclear(&pr->ps_timer[i].it_interval); 656 timerclear(&pr->ps_timer[i].it_value); 657 } 658 splx(s); 659 } 660 661 /* reset CPU time usage for the thread, but not the process */ 662 timespecclear(&p->p_tu.tu_runtime); 663 p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; 664 665 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 666 667 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 668 vn_close(pack.ep_vp, FREAD, cred, p); 669 670 /* 671 * notify others that we exec'd 672 */ 673 KNOTE(&pr->ps_klist, NOTE_EXEC); 674 675 /* setup new registers and do misc. setup. */ 676 if (pack.ep_emul->e_fixup != NULL) { 677 if ((*pack.ep_emul->e_fixup)(p, &pack) != 0) 678 goto free_pack_abort; 679 } 680 #ifdef MACHINE_STACK_GROWS_UP 681 (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval); 682 #else 683 (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval); 684 #endif 685 686 /* map the process's signal trampoline code */ 687 if (exec_sigcode_map(pr, pack.ep_emul)) 688 goto free_pack_abort; 689 690 #ifdef __HAVE_EXEC_MD_MAP 691 /* perform md specific mappings that process might need */ 692 if (exec_md_map(p, &pack)) 693 goto free_pack_abort; 694 #endif 695 696 if (pr->ps_flags & PS_TRACED) 697 psignal(p, SIGTRAP); 698 699 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 700 701 p->p_descfd = 255; 702 if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) 703 p->p_descfd = pack.ep_fd; 704 705 if (pack.ep_flags & EXEC_WXNEEDED) 706 p->p_p->ps_flags |= PS_WXNEEDED; 707 else 708 p->p_p->ps_flags &= ~PS_WXNEEDED; 709 710 /* update ps_emul, the old value is no longer needed */ 711 pr->ps_emul = pack.ep_emul; 712 713 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 714 single_thread_clear(p, P_SUSPSIG); 715 716 return (0); 717 718 bad: 719 /* free the vmspace-creation commands, and release their references */ 720 kill_vmcmds(&pack.ep_vmcmds); 721 /* kill any opened file descriptor, if necessary */ 722 if (pack.ep_flags & EXEC_HASFD) { 723 pack.ep_flags &= ~EXEC_HASFD; 724 fdplock(p->p_fd); 725 (void) fdrelease(p, pack.ep_fd); 726 fdpunlock(p->p_fd); 727 } 728 if (pack.ep_interp != NULL) 729 pool_put(&namei_pool, pack.ep_interp); 730 if (pack.ep_emul_arg != NULL) 731 free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); 732 /* close and put the exec'd file */ 733 vn_close(pack.ep_vp, FREAD, cred, p); 734 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 735 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 736 737 freehdr: 738 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 739 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 740 single_thread_clear(p, P_SUSPSIG); 741 742 return (error); 743 744 exec_abort: 745 /* 746 * the old process doesn't exist anymore. exit gracefully. 747 * get rid of the (new) address space we have created, if any, get rid 748 * of our namei data and vnode, and exit noting failure 749 */ 750 uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, 751 VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); 752 if (pack.ep_interp != NULL) 753 pool_put(&namei_pool, pack.ep_interp); 754 if (pack.ep_emul_arg != NULL) 755 free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); 756 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 757 vn_close(pack.ep_vp, FREAD, cred, p); 758 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 759 760 free_pack_abort: 761 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 762 exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL); 763 764 /* NOTREACHED */ 765 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 766 767 return (0); 768 } 769 770 771 void * 772 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack, 773 void *argp) 774 { 775 char **cpp = stack; 776 char *dp, *sp; 777 size_t len; 778 void *nullp = NULL; 779 long argc = arginfo->ps_nargvstr; 780 int envc = arginfo->ps_nenvstr; 781 782 if (copyout(&argc, cpp++, sizeof(argc))) 783 return (NULL); 784 785 dp = (char *) (cpp + argc + envc + 2 + pack->ep_emul->e_arglen); 786 sp = argp; 787 788 /* XXX don't copy them out, remap them! */ 789 arginfo->ps_argvstr = cpp; /* remember location of argv for later */ 790 791 for (; --argc >= 0; sp += len, dp += len) 792 if (copyout(&dp, cpp++, sizeof(dp)) || 793 copyoutstr(sp, dp, ARG_MAX, &len)) 794 return (NULL); 795 796 if (copyout(&nullp, cpp++, sizeof(nullp))) 797 return (NULL); 798 799 arginfo->ps_envstr = cpp; /* remember location of envp for later */ 800 801 for (; --envc >= 0; sp += len, dp += len) 802 if (copyout(&dp, cpp++, sizeof(dp)) || 803 copyoutstr(sp, dp, ARG_MAX, &len)) 804 return (NULL); 805 806 if (copyout(&nullp, cpp++, sizeof(nullp))) 807 return (NULL); 808 809 return (cpp); 810 } 811 812 int 813 exec_sigcode_map(struct process *pr, struct emul *e) 814 { 815 vsize_t sz; 816 817 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode; 818 819 /* 820 * If we don't have a sigobject for this emulation, create one. 821 * 822 * sigobject is an anonymous memory object (just like SYSV shared 823 * memory) that we keep a permanent reference to and that we map 824 * in all processes that need this sigcode. The creation is simple, 825 * we create an object, add a permanent reference to it, map it in 826 * kernel space, copy out the sigcode to it and unmap it. 827 * Then we map it with PROT_READ|PROT_EXEC into the process just 828 * the way sys_mmap would map it. 829 */ 830 if (e->e_sigobject == NULL) { 831 extern int sigfillsiz; 832 extern u_char sigfill[]; 833 size_t off; 834 vaddr_t va; 835 int r; 836 837 e->e_sigobject = uao_create(sz, 0); 838 uao_reference(e->e_sigobject); /* permanent reference */ 839 840 if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject, 841 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 842 MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) { 843 uao_detach(e->e_sigobject); 844 return (ENOMEM); 845 } 846 847 for (off = 0; off < round_page(sz); off += sigfillsiz) 848 memcpy((caddr_t)va + off, sigfill, sigfillsiz); 849 memcpy((caddr_t)va, e->e_sigcode, sz); 850 uvm_unmap(kernel_map, va, va + round_page(sz)); 851 } 852 853 pr->ps_sigcode = 0; /* no hint */ 854 uao_reference(e->e_sigobject); 855 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz), 856 e->e_sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC, 857 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY, 858 MADV_RANDOM, UVM_FLAG_COPYONW))) { 859 uao_detach(e->e_sigobject); 860 return (ENOMEM); 861 } 862 863 /* Calculate PC at point of sigreturn entry */ 864 pr->ps_sigcoderet = pr->ps_sigcode + 865 (pr->ps_emul->e_esigret - pr->ps_emul->e_sigcode); 866 867 return (0); 868 } 869