1 /* $OpenBSD: kern_exec.c,v 1.219 2020/10/15 16:31:11 cheloha Exp $ */ 2 /* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */ 3 4 /*- 5 * Copyright (C) 1993, 1994 Christopher G. Demetriou 6 * Copyright (C) 1992 Wolfgang Solfrank. 7 * Copyright (C) 1992 TooLs GmbH. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by TooLs GmbH. 21 * 4. The name of TooLs GmbH may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/filedesc.h> 39 #include <sys/kernel.h> 40 #include <sys/proc.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/pool.h> 44 #include <sys/namei.h> 45 #include <sys/vnode.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/acct.h> 49 #include <sys/exec.h> 50 #include <sys/ktrace.h> 51 #include <sys/resourcevar.h> 52 #include <sys/wait.h> 53 #include <sys/mman.h> 54 #include <sys/signalvar.h> 55 #include <sys/stat.h> 56 #include <sys/conf.h> 57 #include <sys/pledge.h> 58 #ifdef SYSVSHM 59 #include <sys/shm.h> 60 #endif 61 62 #include <sys/syscallargs.h> 63 64 #include <uvm/uvm_extern.h> 65 #include <machine/tcb.h> 66 67 #include <sys/timetc.h> 68 69 struct uvm_object *timekeep_object; 70 struct timekeep *timekeep; 71 72 void unveil_destroy(struct process *ps); 73 74 const struct kmem_va_mode kv_exec = { 75 .kv_wait = 1, 76 .kv_map = &exec_map 77 }; 78 79 /* 80 * Map the shared signal code. 81 */ 82 int exec_sigcode_map(struct process *, struct emul *); 83 84 /* 85 * Map the shared timekeep page. 86 */ 87 int exec_timekeep_map(struct process *); 88 89 /* 90 * If non-zero, stackgap_random specifies the upper limit of the random gap size 91 * added to the fixed stack position. Must be n^2. 92 */ 93 int stackgap_random = STACKGAP_RANDOM; 94 95 /* 96 * check exec: 97 * given an "executable" described in the exec package's namei info, 98 * see what we can do with it. 99 * 100 * ON ENTRY: 101 * exec package with appropriate namei info 102 * proc pointer of exec'ing proc 103 * NO SELF-LOCKED VNODES 104 * 105 * ON EXIT: 106 * error: nothing held, etc. exec header still allocated. 107 * ok: filled exec package, one locked vnode. 108 * 109 * EXEC SWITCH ENTRY: 110 * Locked vnode to check, exec package, proc. 111 * 112 * EXEC SWITCH EXIT: 113 * ok: return 0, filled exec package, one locked vnode. 114 * error: destructive: 115 * everything deallocated except exec header. 116 * non-destructive: 117 * error code, locked vnode, exec header unmodified 118 */ 119 int 120 check_exec(struct proc *p, struct exec_package *epp) 121 { 122 int error, i; 123 struct vnode *vp; 124 struct nameidata *ndp; 125 size_t resid; 126 127 ndp = epp->ep_ndp; 128 ndp->ni_cnd.cn_nameiop = LOOKUP; 129 ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME; 130 if (epp->ep_flags & EXEC_INDIR) 131 ndp->ni_cnd.cn_flags |= BYPASSUNVEIL; 132 /* first get the vnode */ 133 if ((error = namei(ndp)) != 0) 134 return (error); 135 epp->ep_vp = vp = ndp->ni_vp; 136 137 /* check for regular file */ 138 if (vp->v_type != VREG) { 139 error = EACCES; 140 goto bad1; 141 } 142 143 /* get attributes */ 144 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) 145 goto bad1; 146 147 /* Check mount point */ 148 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 149 error = EACCES; 150 goto bad1; 151 } 152 153 /* SUID programs may not be started with execpromises */ 154 if ((epp->ep_vap->va_mode & (VSUID | VSGID)) && 155 (p->p_p->ps_flags & PS_EXECPLEDGE)) { 156 error = EACCES; 157 goto bad1; 158 } 159 160 if ((vp->v_mount->mnt_flag & MNT_NOSUID)) 161 epp->ep_vap->va_mode &= ~(VSUID | VSGID); 162 163 /* check access. for root we have to see if any exec bit on */ 164 if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0) 165 goto bad1; 166 if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { 167 error = EACCES; 168 goto bad1; 169 } 170 171 /* try to open it */ 172 if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0) 173 goto bad1; 174 175 /* unlock vp, we need it unlocked from here */ 176 VOP_UNLOCK(vp); 177 178 /* now we have the file, get the exec header */ 179 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, 180 UIO_SYSSPACE, 0, p->p_ucred, &resid, p); 181 if (error) 182 goto bad2; 183 epp->ep_hdrvalid = epp->ep_hdrlen - resid; 184 185 /* 186 * set up the vmcmds for creation of the process 187 * address space 188 */ 189 error = ENOEXEC; 190 for (i = 0; i < nexecs && error != 0; i++) { 191 int newerror; 192 193 if (execsw[i].es_check == NULL) 194 continue; 195 newerror = (*execsw[i].es_check)(p, epp); 196 /* make sure the first "interesting" error code is saved. */ 197 if (!newerror || error == ENOEXEC) 198 error = newerror; 199 if (epp->ep_flags & EXEC_DESTR && error != 0) 200 return (error); 201 } 202 if (!error) { 203 /* check that entry point is sane */ 204 if (epp->ep_entry > VM_MAXUSER_ADDRESS) { 205 error = ENOEXEC; 206 } 207 208 /* check limits */ 209 if ((epp->ep_tsize > MAXTSIZ) || 210 (epp->ep_dsize > lim_cur(RLIMIT_DATA))) 211 error = ENOMEM; 212 213 if (!error) 214 return (0); 215 } 216 217 /* 218 * free any vmspace-creation commands, 219 * and release their references 220 */ 221 kill_vmcmds(&epp->ep_vmcmds); 222 223 bad2: 224 /* 225 * close the vnode, free the pathname buf, and punt. 226 */ 227 vn_close(vp, FREAD, p->p_ucred, p); 228 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 229 return (error); 230 231 bad1: 232 /* 233 * free the namei pathname buffer, and put the vnode 234 * (which we don't yet have open). 235 */ 236 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 237 vput(vp); 238 return (error); 239 } 240 241 /* 242 * exec system call 243 */ 244 int 245 sys_execve(struct proc *p, void *v, register_t *retval) 246 { 247 struct sys_execve_args /* { 248 syscallarg(const char *) path; 249 syscallarg(char *const *) argp; 250 syscallarg(char *const *) envp; 251 } */ *uap = v; 252 int error; 253 struct exec_package pack; 254 struct nameidata nid; 255 struct vattr attr; 256 struct ucred *cred = p->p_ucred; 257 char *argp; 258 char * const *cpp, *dp, *sp; 259 #ifdef KTRACE 260 char *env_start; 261 #endif 262 struct process *pr = p->p_p; 263 long argc, envc; 264 size_t len, sgap, dstsize; 265 #ifdef MACHINE_STACK_GROWS_UP 266 size_t slen; 267 #endif 268 char *stack; 269 struct ps_strings arginfo; 270 struct vmspace *vm; 271 extern struct emul emul_native; 272 struct vnode *otvp; 273 274 /* get other threads to stop */ 275 if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) 276 return (error); 277 278 /* 279 * Cheap solution to complicated problems. 280 * Mark this process as "leave me alone, I'm execing". 281 */ 282 atomic_setbits_int(&pr->ps_flags, PS_INEXEC); 283 284 NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); 285 nid.ni_pledge = PLEDGE_EXEC; 286 nid.ni_unveil = UNVEIL_EXEC; 287 288 /* 289 * initialize the fields of the exec package. 290 */ 291 pack.ep_name = (char *)SCARG(uap, path); 292 pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); 293 pack.ep_hdrlen = exec_maxhdrsz; 294 pack.ep_hdrvalid = 0; 295 pack.ep_ndp = &nid; 296 pack.ep_interp = NULL; 297 pack.ep_emul_arg = NULL; 298 VMCMDSET_INIT(&pack.ep_vmcmds); 299 pack.ep_vap = &attr; 300 pack.ep_emul = &emul_native; 301 pack.ep_flags = 0; 302 303 /* see if we can run it. */ 304 if ((error = check_exec(p, &pack)) != 0) { 305 goto freehdr; 306 } 307 308 /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ 309 310 /* allocate an argument buffer */ 311 argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); 312 #ifdef DIAGNOSTIC 313 if (argp == NULL) 314 panic("execve: argp == NULL"); 315 #endif 316 dp = argp; 317 argc = 0; 318 319 /* 320 * Copy the fake args list, if there's one, freeing it as we go. 321 * exec_script_makecmds() allocates either 2 or 3 fake args bounded 322 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen. 323 */ 324 if (pack.ep_flags & EXEC_HASARGL) { 325 dstsize = NCARGS; 326 for(; pack.ep_fa[argc] != NULL; argc++) { 327 len = strlcpy(dp, pack.ep_fa[argc], dstsize); 328 len++; 329 dp += len; dstsize -= len; 330 if (pack.ep_fa[argc+1] != NULL) 331 free(pack.ep_fa[argc], M_EXEC, len); 332 else 333 free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN); 334 } 335 free(pack.ep_fa, M_EXEC, 4 * sizeof(char *)); 336 pack.ep_flags &= ~EXEC_HASARGL; 337 } 338 339 /* Now get argv & environment */ 340 if (!(cpp = SCARG(uap, argp))) { 341 error = EFAULT; 342 goto bad; 343 } 344 345 if (pack.ep_flags & EXEC_SKIPARG) 346 cpp++; 347 348 while (1) { 349 len = argp + ARG_MAX - dp; 350 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 351 goto bad; 352 if (!sp) 353 break; 354 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 355 if (error == ENAMETOOLONG) 356 error = E2BIG; 357 goto bad; 358 } 359 dp += len; 360 cpp++; 361 argc++; 362 } 363 364 /* must have at least one argument */ 365 if (argc == 0) { 366 error = EINVAL; 367 goto bad; 368 } 369 370 #ifdef KTRACE 371 if (KTRPOINT(p, KTR_EXECARGS)) 372 ktrexec(p, KTR_EXECARGS, argp, dp - argp); 373 #endif 374 375 envc = 0; 376 /* environment does not need to be there */ 377 if ((cpp = SCARG(uap, envp)) != NULL ) { 378 #ifdef KTRACE 379 env_start = dp; 380 #endif 381 while (1) { 382 len = argp + ARG_MAX - dp; 383 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 384 goto bad; 385 if (!sp) 386 break; 387 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 388 if (error == ENAMETOOLONG) 389 error = E2BIG; 390 goto bad; 391 } 392 dp += len; 393 cpp++; 394 envc++; 395 } 396 397 #ifdef KTRACE 398 if (KTRPOINT(p, KTR_EXECENV)) 399 ktrexec(p, KTR_EXECENV, env_start, dp - env_start); 400 #endif 401 } 402 403 dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); 404 405 sgap = STACKGAPLEN; 406 407 /* 408 * If we have enabled random stackgap, the stack itself has already 409 * been moved from a random location, but is still aligned to a page 410 * boundary. Provide the lower bits of random placement now. 411 */ 412 if (stackgap_random != 0) { 413 sgap += arc4random() & PAGE_MASK; 414 sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; 415 } 416 417 /* Now check if args & environ fit into new stack */ 418 len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) + 419 sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; 420 421 len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; 422 423 if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ 424 error = ENOMEM; 425 goto bad; 426 } 427 428 /* adjust "active stack depth" for process VSZ */ 429 pack.ep_ssize = len; /* maybe should go elsewhere, but... */ 430 431 /* 432 * we're committed: any further errors will kill the process, so 433 * kill the other threads now. 434 */ 435 single_thread_set(p, SINGLE_EXIT, 0); 436 437 /* 438 * Prepare vmspace for remapping. Note that uvmspace_exec can replace 439 * ps_vmspace! 440 */ 441 uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 442 443 vm = pr->ps_vmspace; 444 /* Now map address space */ 445 vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); 446 vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - 447 trunc_page(pack.ep_taddr)); 448 vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); 449 vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - 450 trunc_page(pack.ep_daddr)); 451 vm->vm_dused = 0; 452 vm->vm_ssize = atop(round_page(pack.ep_ssize)); 453 vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; 454 vm->vm_minsaddr = (char *)pack.ep_minsaddr; 455 456 /* create the new process's VM space by running the vmcmds */ 457 #ifdef DIAGNOSTIC 458 if (pack.ep_vmcmds.evs_used == 0) 459 panic("execve: no vmcmds"); 460 #endif 461 error = exec_process_vmcmds(p, &pack); 462 463 /* if an error happened, deallocate and punt */ 464 if (error) 465 goto exec_abort; 466 467 #ifdef MACHINE_STACK_GROWS_UP 468 pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; 469 if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, 470 trunc_page(pr->ps_strings), PROT_NONE, TRUE)) 471 goto exec_abort; 472 #else 473 pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; 474 if (uvm_map_protect(&vm->vm_map, 475 round_page(pr->ps_strings + sizeof(arginfo)), 476 (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE)) 477 goto exec_abort; 478 #endif 479 480 memset(&arginfo, 0, sizeof(arginfo)); 481 482 /* remember information about the process */ 483 arginfo.ps_nargvstr = argc; 484 arginfo.ps_nenvstr = envc; 485 486 #ifdef MACHINE_STACK_GROWS_UP 487 stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; 488 slen = len - sizeof(arginfo) - sgap; 489 #else 490 stack = (char *)(vm->vm_minsaddr - len); 491 #endif 492 /* Now copy argc, args & environ to new stack */ 493 if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp)) 494 goto exec_abort; 495 496 /* copy out the process's ps_strings structure */ 497 if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) 498 goto exec_abort; 499 500 stopprofclock(pr); /* stop profiling */ 501 fdcloseexec(p); /* handle close on exec */ 502 execsigs(p); /* reset caught signals */ 503 TCB_SET(p, NULL); /* reset the TCB address */ 504 pr->ps_kbind_addr = 0; /* reset the kbind bits */ 505 pr->ps_kbind_cookie = 0; 506 arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie); 507 508 /* set command name & other accounting info */ 509 memset(pr->ps_comm, 0, sizeof(pr->ps_comm)); 510 len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN); 511 memcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, len); 512 pr->ps_acflag &= ~AFORK; 513 514 /* record proc's vnode, for use by sysctl */ 515 otvp = pr->ps_textvp; 516 vref(pack.ep_vp); 517 pr->ps_textvp = pack.ep_vp; 518 if (otvp) 519 vrele(otvp); 520 521 atomic_setbits_int(&pr->ps_flags, PS_EXEC); 522 if (pr->ps_flags & PS_PPWAIT) { 523 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 524 atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); 525 wakeup(pr->ps_pptr); 526 } 527 528 /* 529 * If process does execve() while it has a mismatched real, 530 * effective, or saved uid/gid, we set PS_SUGIDEXEC. 531 */ 532 if (cred->cr_uid != cred->cr_ruid || 533 cred->cr_uid != cred->cr_svuid || 534 cred->cr_gid != cred->cr_rgid || 535 cred->cr_gid != cred->cr_svgid) 536 atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); 537 else 538 atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); 539 540 if (pr->ps_flags & PS_EXECPLEDGE) { 541 pr->ps_pledge = pr->ps_execpledge; 542 atomic_setbits_int(&pr->ps_flags, PS_PLEDGE); 543 } else { 544 atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE); 545 pr->ps_pledge = 0; 546 /* XXX XXX XXX XXX */ 547 /* Clear our unveil paths out so the child 548 * starts afresh 549 */ 550 unveil_destroy(pr); 551 pr->ps_uvdone = 0; 552 } 553 554 /* 555 * deal with set[ug]id. 556 * MNT_NOEXEC has already been used to disable s[ug]id. 557 */ 558 if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { 559 int i; 560 561 atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); 562 563 #ifdef KTRACE 564 /* 565 * If process is being ktraced, turn off - unless 566 * root set it. 567 */ 568 if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) 569 ktrcleartrace(pr); 570 #endif 571 p->p_ucred = cred = crcopy(cred); 572 if (attr.va_mode & VSUID) 573 cred->cr_uid = attr.va_uid; 574 if (attr.va_mode & VSGID) 575 cred->cr_gid = attr.va_gid; 576 577 /* 578 * For set[ug]id processes, a few caveats apply to 579 * stdin, stdout, and stderr. 580 */ 581 error = 0; 582 fdplock(p->p_fd); 583 for (i = 0; i < 3; i++) { 584 struct file *fp = NULL; 585 586 /* 587 * NOTE - This will never return NULL because of 588 * immature fds. The file descriptor table is not 589 * shared because we're suid. 590 */ 591 fp = fd_getfile(p->p_fd, i); 592 593 /* 594 * Ensure that stdin, stdout, and stderr are already 595 * allocated. We do not want userland to accidentally 596 * allocate descriptors in this range which has implied 597 * meaning to libc. 598 */ 599 if (fp == NULL) { 600 short flags = FREAD | (i == 0 ? 0 : FWRITE); 601 struct vnode *vp; 602 int indx; 603 604 if ((error = falloc(p, &fp, &indx)) != 0) 605 break; 606 #ifdef DIAGNOSTIC 607 if (indx != i) 608 panic("sys_execve: falloc indx != i"); 609 #endif 610 if ((error = cdevvp(getnulldev(), &vp)) != 0) { 611 fdremove(p->p_fd, indx); 612 closef(fp, p); 613 break; 614 } 615 if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { 616 fdremove(p->p_fd, indx); 617 closef(fp, p); 618 vrele(vp); 619 break; 620 } 621 if (flags & FWRITE) 622 vp->v_writecount++; 623 fp->f_flag = flags; 624 fp->f_type = DTYPE_VNODE; 625 fp->f_ops = &vnops; 626 fp->f_data = (caddr_t)vp; 627 fdinsert(p->p_fd, indx, 0, fp); 628 } 629 FRELE(fp, p); 630 } 631 fdpunlock(p->p_fd); 632 if (error) 633 goto exec_abort; 634 } else 635 atomic_clearbits_int(&pr->ps_flags, PS_SUGID); 636 637 /* 638 * Reset the saved ugids and update the process's copy of the 639 * creds if the creds have been changed 640 */ 641 if (cred->cr_uid != cred->cr_svuid || 642 cred->cr_gid != cred->cr_svgid) { 643 /* make sure we have unshared ucreds */ 644 p->p_ucred = cred = crcopy(cred); 645 cred->cr_svuid = cred->cr_uid; 646 cred->cr_svgid = cred->cr_gid; 647 } 648 649 if (pr->ps_ucred != cred) { 650 struct ucred *ocred; 651 652 ocred = pr->ps_ucred; 653 crhold(cred); 654 pr->ps_ucred = cred; 655 crfree(ocred); 656 } 657 658 if (pr->ps_flags & PS_SUGIDEXEC) { 659 cancel_all_itimers(); 660 } 661 662 /* reset CPU time usage for the thread, but not the process */ 663 timespecclear(&p->p_tu.tu_runtime); 664 p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; 665 666 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 667 668 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 669 vn_close(pack.ep_vp, FREAD, cred, p); 670 671 /* 672 * notify others that we exec'd 673 */ 674 KNOTE(&pr->ps_klist, NOTE_EXEC); 675 676 /* map the process's timekeep page, needs to be before e_fixup */ 677 if (exec_timekeep_map(pr)) 678 goto free_pack_abort; 679 680 /* setup new registers and do misc. setup. */ 681 if (pack.ep_emul->e_fixup != NULL) { 682 if ((*pack.ep_emul->e_fixup)(p, &pack) != 0) 683 goto free_pack_abort; 684 } 685 #ifdef MACHINE_STACK_GROWS_UP 686 (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval); 687 #else 688 (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval); 689 #endif 690 691 /* map the process's signal trampoline code */ 692 if (exec_sigcode_map(pr, pack.ep_emul)) 693 goto free_pack_abort; 694 695 #ifdef __HAVE_EXEC_MD_MAP 696 /* perform md specific mappings that process might need */ 697 if (exec_md_map(p, &pack)) 698 goto free_pack_abort; 699 #endif 700 701 if (pr->ps_flags & PS_TRACED) 702 psignal(p, SIGTRAP); 703 704 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 705 706 p->p_descfd = 255; 707 if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) 708 p->p_descfd = pack.ep_fd; 709 710 if (pack.ep_flags & EXEC_WXNEEDED) 711 atomic_setbits_int(&p->p_p->ps_flags, PS_WXNEEDED); 712 else 713 atomic_clearbits_int(&p->p_p->ps_flags, PS_WXNEEDED); 714 715 /* update ps_emul, the old value is no longer needed */ 716 pr->ps_emul = pack.ep_emul; 717 718 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 719 single_thread_clear(p, P_SUSPSIG); 720 721 return (0); 722 723 bad: 724 /* free the vmspace-creation commands, and release their references */ 725 kill_vmcmds(&pack.ep_vmcmds); 726 /* kill any opened file descriptor, if necessary */ 727 if (pack.ep_flags & EXEC_HASFD) { 728 pack.ep_flags &= ~EXEC_HASFD; 729 fdplock(p->p_fd); 730 /* fdrelease unlocks p->p_fd. */ 731 (void) fdrelease(p, pack.ep_fd); 732 } 733 if (pack.ep_interp != NULL) 734 pool_put(&namei_pool, pack.ep_interp); 735 if (pack.ep_emul_arg != NULL) 736 free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); 737 /* close and put the exec'd file */ 738 vn_close(pack.ep_vp, FREAD, cred, p); 739 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 740 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 741 742 freehdr: 743 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 744 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 745 single_thread_clear(p, P_SUSPSIG); 746 747 return (error); 748 749 exec_abort: 750 /* 751 * the old process doesn't exist anymore. exit gracefully. 752 * get rid of the (new) address space we have created, if any, get rid 753 * of our namei data and vnode, and exit noting failure 754 */ 755 uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 756 if (pack.ep_interp != NULL) 757 pool_put(&namei_pool, pack.ep_interp); 758 if (pack.ep_emul_arg != NULL) 759 free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); 760 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 761 vn_close(pack.ep_vp, FREAD, cred, p); 762 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 763 764 free_pack_abort: 765 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 766 exit1(p, 0, SIGABRT, EXIT_NORMAL); 767 768 /* NOTREACHED */ 769 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 770 771 return (0); 772 } 773 774 775 void * 776 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack, 777 void *argp) 778 { 779 char **cpp = stack; 780 char *dp, *sp; 781 size_t len; 782 void *nullp = NULL; 783 long argc = arginfo->ps_nargvstr; 784 int envc = arginfo->ps_nenvstr; 785 786 if (copyout(&argc, cpp++, sizeof(argc))) 787 return (NULL); 788 789 dp = (char *) (cpp + argc + envc + 2 + pack->ep_emul->e_arglen); 790 sp = argp; 791 792 /* XXX don't copy them out, remap them! */ 793 arginfo->ps_argvstr = cpp; /* remember location of argv for later */ 794 795 for (; --argc >= 0; sp += len, dp += len) 796 if (copyout(&dp, cpp++, sizeof(dp)) || 797 copyoutstr(sp, dp, ARG_MAX, &len)) 798 return (NULL); 799 800 if (copyout(&nullp, cpp++, sizeof(nullp))) 801 return (NULL); 802 803 arginfo->ps_envstr = cpp; /* remember location of envp for later */ 804 805 for (; --envc >= 0; sp += len, dp += len) 806 if (copyout(&dp, cpp++, sizeof(dp)) || 807 copyoutstr(sp, dp, ARG_MAX, &len)) 808 return (NULL); 809 810 if (copyout(&nullp, cpp++, sizeof(nullp))) 811 return (NULL); 812 813 return (cpp); 814 } 815 816 int 817 exec_sigcode_map(struct process *pr, struct emul *e) 818 { 819 vsize_t sz; 820 821 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode; 822 823 /* 824 * If we don't have a sigobject for this emulation, create one. 825 * 826 * sigobject is an anonymous memory object (just like SYSV shared 827 * memory) that we keep a permanent reference to and that we map 828 * in all processes that need this sigcode. The creation is simple, 829 * we create an object, add a permanent reference to it, map it in 830 * kernel space, copy out the sigcode to it and unmap it. 831 * Then we map it with PROT_READ|PROT_EXEC into the process just 832 * the way sys_mmap would map it. 833 */ 834 if (e->e_sigobject == NULL) { 835 extern int sigfillsiz; 836 extern u_char sigfill[]; 837 size_t off; 838 vaddr_t va; 839 int r; 840 841 e->e_sigobject = uao_create(sz, 0); 842 uao_reference(e->e_sigobject); /* permanent reference */ 843 844 if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject, 845 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 846 MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) { 847 uao_detach(e->e_sigobject); 848 return (ENOMEM); 849 } 850 851 for (off = 0; off < round_page(sz); off += sigfillsiz) 852 memcpy((caddr_t)va + off, sigfill, sigfillsiz); 853 memcpy((caddr_t)va, e->e_sigcode, sz); 854 uvm_unmap(kernel_map, va, va + round_page(sz)); 855 } 856 857 pr->ps_sigcode = 0; /* no hint */ 858 uao_reference(e->e_sigobject); 859 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz), 860 e->e_sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC, 861 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY, 862 MADV_RANDOM, UVM_FLAG_COPYONW | UVM_FLAG_SYSCALL))) { 863 uao_detach(e->e_sigobject); 864 return (ENOMEM); 865 } 866 867 /* Calculate PC at point of sigreturn entry */ 868 pr->ps_sigcoderet = pr->ps_sigcode + 869 (pr->ps_emul->e_esigret - pr->ps_emul->e_sigcode); 870 871 return (0); 872 } 873 874 int 875 exec_timekeep_map(struct process *pr) 876 { 877 size_t timekeep_sz = round_page(sizeof(struct timekeep)); 878 879 /* 880 * Similar to the sigcode object, except that there is a single 881 * timekeep object, and not one per emulation. 882 */ 883 if (timekeep_object == NULL) { 884 vaddr_t va = 0; 885 886 timekeep_object = uao_create(timekeep_sz, 0); 887 uao_reference(timekeep_object); 888 889 if (uvm_map(kernel_map, &va, timekeep_sz, timekeep_object, 890 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 891 MAP_INHERIT_SHARE, MADV_RANDOM, 0))) { 892 uao_detach(timekeep_object); 893 timekeep_object = NULL; 894 return (ENOMEM); 895 } 896 if (uvm_fault_wire(kernel_map, va, va + timekeep_sz, 897 PROT_READ | PROT_WRITE)) { 898 uvm_unmap(kernel_map, va, va + timekeep_sz); 899 uao_detach(timekeep_object); 900 timekeep_object = NULL; 901 return (ENOMEM); 902 } 903 904 timekeep = (struct timekeep *)va; 905 timekeep->tk_version = TK_VERSION; 906 } 907 908 pr->ps_timekeep = 0; /* no hint */ 909 uao_reference(timekeep_object); 910 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_timekeep, timekeep_sz, 911 timekeep_object, 0, 0, UVM_MAPFLAG(PROT_READ, PROT_READ, 912 MAP_INHERIT_COPY, MADV_RANDOM, 0))) { 913 uao_detach(timekeep_object); 914 return (ENOMEM); 915 } 916 917 return (0); 918 } 919