1 /* $OpenBSD: kern_exec.c,v 1.248 2023/05/30 08:30:01 jsg Exp $ */ 2 /* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */ 3 4 /*- 5 * Copyright (C) 1993, 1994 Christopher G. Demetriou 6 * Copyright (C) 1992 Wolfgang Solfrank. 7 * Copyright (C) 1992 TooLs GmbH. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by TooLs GmbH. 21 * 4. The name of TooLs GmbH may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/filedesc.h> 39 #include <sys/proc.h> 40 #include <sys/user.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/pool.h> 44 #include <sys/namei.h> 45 #include <sys/vnode.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/acct.h> 49 #include <sys/exec.h> 50 #include <sys/exec_elf.h> 51 #include <sys/ktrace.h> 52 #include <sys/resourcevar.h> 53 #include <sys/mman.h> 54 #include <sys/signalvar.h> 55 #include <sys/stat.h> 56 #include <sys/conf.h> 57 #include <sys/pledge.h> 58 #ifdef SYSVSHM 59 #include <sys/shm.h> 60 #endif 61 62 #include <sys/syscallargs.h> 63 64 #include <uvm/uvm_extern.h> 65 #include <machine/tcb.h> 66 67 #include <sys/timetc.h> 68 69 struct uvm_object *sigobject; /* shared sigcode object */ 70 vaddr_t sigcode_va; 71 vsize_t sigcode_sz; 72 struct uvm_object *timekeep_object; 73 struct timekeep *timekeep; 74 75 void unveil_destroy(struct process *ps); 76 77 const struct kmem_va_mode kv_exec = { 78 .kv_wait = 1, 79 .kv_map = &exec_map 80 }; 81 82 /* 83 * Map the shared signal code. 84 */ 85 int exec_sigcode_map(struct process *); 86 87 /* 88 * Map the shared timekeep page. 89 */ 90 int exec_timekeep_map(struct process *); 91 92 /* 93 * If non-zero, stackgap_random specifies the upper limit of the random gap size 94 * added to the fixed stack position. Must be n^2. 95 */ 96 int stackgap_random = STACKGAP_RANDOM; 97 98 /* 99 * check exec: 100 * given an "executable" described in the exec package's namei info, 101 * see what we can do with it. 102 * 103 * ON ENTRY: 104 * exec package with appropriate namei info 105 * proc pointer of exec'ing proc 106 * NO SELF-LOCKED VNODES 107 * 108 * ON EXIT: 109 * error: nothing held, etc. exec header still allocated. 110 * ok: filled exec package, one locked vnode. 111 * 112 * EXEC SWITCH ENTRY: 113 * Locked vnode to check, exec package, proc. 114 * 115 * EXEC SWITCH EXIT: 116 * ok: return 0, filled exec package, one locked vnode. 117 * error: destructive: 118 * everything deallocated except exec header. 119 * non-destructive: 120 * error code, locked vnode, exec header unmodified 121 */ 122 int 123 check_exec(struct proc *p, struct exec_package *epp) 124 { 125 int error, i; 126 struct vnode *vp; 127 struct nameidata *ndp; 128 size_t resid; 129 130 ndp = epp->ep_ndp; 131 ndp->ni_cnd.cn_nameiop = LOOKUP; 132 ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME; 133 if (epp->ep_flags & EXEC_INDIR) 134 ndp->ni_cnd.cn_flags |= BYPASSUNVEIL; 135 /* first get the vnode */ 136 if ((error = namei(ndp)) != 0) 137 return (error); 138 epp->ep_vp = vp = ndp->ni_vp; 139 140 /* check for regular file */ 141 if (vp->v_type != VREG) { 142 error = EACCES; 143 goto bad1; 144 } 145 146 /* get attributes */ 147 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) 148 goto bad1; 149 150 /* Check mount point */ 151 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 152 error = EACCES; 153 goto bad1; 154 } 155 156 /* SUID programs may not be started with execpromises */ 157 if ((epp->ep_vap->va_mode & (VSUID | VSGID)) && 158 (p->p_p->ps_flags & PS_EXECPLEDGE)) { 159 error = EACCES; 160 goto bad1; 161 } 162 163 if ((vp->v_mount->mnt_flag & MNT_NOSUID)) 164 epp->ep_vap->va_mode &= ~(VSUID | VSGID); 165 166 /* check access. for root we have to see if any exec bit on */ 167 if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0) 168 goto bad1; 169 if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { 170 error = EACCES; 171 goto bad1; 172 } 173 174 /* try to open it */ 175 if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0) 176 goto bad1; 177 178 /* unlock vp, we need it unlocked from here */ 179 VOP_UNLOCK(vp); 180 181 /* now we have the file, get the exec header */ 182 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, 183 UIO_SYSSPACE, 0, p->p_ucred, &resid, p); 184 if (error) 185 goto bad2; 186 epp->ep_hdrvalid = epp->ep_hdrlen - resid; 187 188 /* 189 * set up the vmcmds for creation of the process 190 * address space 191 */ 192 error = ENOEXEC; 193 for (i = 0; i < nexecs && error != 0; i++) { 194 int newerror; 195 196 if (execsw[i].es_check == NULL) 197 continue; 198 newerror = (*execsw[i].es_check)(p, epp); 199 /* make sure the first "interesting" error code is saved. */ 200 if (!newerror || error == ENOEXEC) 201 error = newerror; 202 if (epp->ep_flags & EXEC_DESTR && error != 0) 203 return (error); 204 } 205 if (!error) { 206 /* check that entry point is sane */ 207 if (epp->ep_entry > VM_MAXUSER_ADDRESS) { 208 error = ENOEXEC; 209 } 210 211 /* check limits */ 212 if ((epp->ep_tsize > MAXTSIZ) || 213 (epp->ep_dsize > lim_cur(RLIMIT_DATA))) 214 error = ENOMEM; 215 216 if (!error) 217 return (0); 218 } 219 220 /* 221 * free any vmspace-creation commands, 222 * and release their references 223 */ 224 kill_vmcmds(&epp->ep_vmcmds); 225 226 bad2: 227 /* 228 * close the vnode, free the pathname buf, and punt. 229 */ 230 vn_close(vp, FREAD, p->p_ucred, p); 231 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 232 return (error); 233 234 bad1: 235 /* 236 * free the namei pathname buffer, and put the vnode 237 * (which we don't yet have open). 238 */ 239 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 240 vput(vp); 241 return (error); 242 } 243 244 /* 245 * exec system call 246 */ 247 int 248 sys_execve(struct proc *p, void *v, register_t *retval) 249 { 250 struct sys_execve_args /* { 251 syscallarg(const char *) path; 252 syscallarg(char *const *) argp; 253 syscallarg(char *const *) envp; 254 } */ *uap = v; 255 int error; 256 struct exec_package pack; 257 struct nameidata nid; 258 struct vattr attr; 259 struct ucred *cred = p->p_ucred; 260 char *argp; 261 char * const *cpp, *dp, *sp; 262 #ifdef KTRACE 263 char *env_start; 264 #endif 265 struct process *pr = p->p_p; 266 long argc, envc; 267 size_t len, sgap, dstsize; 268 #ifdef MACHINE_STACK_GROWS_UP 269 size_t slen; 270 #endif 271 vaddr_t pc = PROC_PC(p); 272 char *stack; 273 struct ps_strings arginfo; 274 struct vmspace *vm = p->p_vmspace; 275 struct vnode *otvp; 276 277 if (vm->vm_execve && 278 (pc >= vm->vm_execve_end || pc < vm->vm_execve)) { 279 printf("%s(%d): execve %lx outside %lx-%lx\n", pr->ps_comm, 280 pr->ps_pid, pc, vm->vm_execve, vm->vm_execve_end); 281 p->p_p->ps_acflag |= AEXECVE; 282 sigabort(p); 283 return (0); 284 } 285 286 /* get other threads to stop */ 287 if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) 288 return (error); 289 290 /* 291 * Cheap solution to complicated problems. 292 * Mark this process as "leave me alone, I'm execing". 293 */ 294 atomic_setbits_int(&pr->ps_flags, PS_INEXEC); 295 296 NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); 297 nid.ni_pledge = PLEDGE_EXEC; 298 nid.ni_unveil = UNVEIL_EXEC; 299 300 /* 301 * initialize the fields of the exec package. 302 */ 303 pack.ep_name = (char *)SCARG(uap, path); 304 pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); 305 pack.ep_hdrlen = exec_maxhdrsz; 306 pack.ep_hdrvalid = 0; 307 pack.ep_ndp = &nid; 308 pack.ep_interp = NULL; 309 pack.ep_args = NULL; 310 pack.ep_auxinfo = NULL; 311 VMCMDSET_INIT(&pack.ep_vmcmds); 312 pack.ep_vap = &attr; 313 pack.ep_flags = 0; 314 315 /* see if we can run it. */ 316 if ((error = check_exec(p, &pack)) != 0) { 317 goto freehdr; 318 } 319 320 /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ 321 322 /* allocate an argument buffer */ 323 argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); 324 #ifdef DIAGNOSTIC 325 if (argp == NULL) 326 panic("execve: argp == NULL"); 327 #endif 328 dp = argp; 329 argc = 0; 330 331 /* 332 * Copy the fake args list, if there's one, freeing it as we go. 333 * exec_script_makecmds() allocates either 2 or 3 fake args bounded 334 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen. 335 */ 336 if (pack.ep_flags & EXEC_HASARGL) { 337 dstsize = NCARGS; 338 for(; pack.ep_fa[argc] != NULL; argc++) { 339 len = strlcpy(dp, pack.ep_fa[argc], dstsize); 340 len++; 341 dp += len; dstsize -= len; 342 if (pack.ep_fa[argc+1] != NULL) 343 free(pack.ep_fa[argc], M_EXEC, len); 344 else 345 free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN); 346 } 347 free(pack.ep_fa, M_EXEC, 4 * sizeof(char *)); 348 pack.ep_flags &= ~EXEC_HASARGL; 349 } 350 351 /* Now get argv & environment */ 352 if (!(cpp = SCARG(uap, argp))) { 353 error = EFAULT; 354 goto bad; 355 } 356 357 if (pack.ep_flags & EXEC_SKIPARG) 358 cpp++; 359 360 while (1) { 361 len = argp + ARG_MAX - dp; 362 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 363 goto bad; 364 if (!sp) 365 break; 366 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 367 if (error == ENAMETOOLONG) 368 error = E2BIG; 369 goto bad; 370 } 371 dp += len; 372 cpp++; 373 argc++; 374 } 375 376 /* must have at least one argument */ 377 if (argc == 0) { 378 error = EINVAL; 379 goto bad; 380 } 381 382 #ifdef KTRACE 383 if (KTRPOINT(p, KTR_EXECARGS)) 384 ktrexec(p, KTR_EXECARGS, argp, dp - argp); 385 #endif 386 387 envc = 0; 388 /* environment does not need to be there */ 389 if ((cpp = SCARG(uap, envp)) != NULL ) { 390 #ifdef KTRACE 391 env_start = dp; 392 #endif 393 while (1) { 394 len = argp + ARG_MAX - dp; 395 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 396 goto bad; 397 if (!sp) 398 break; 399 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 400 if (error == ENAMETOOLONG) 401 error = E2BIG; 402 goto bad; 403 } 404 dp += len; 405 cpp++; 406 envc++; 407 } 408 409 #ifdef KTRACE 410 if (KTRPOINT(p, KTR_EXECENV)) 411 ktrexec(p, KTR_EXECENV, env_start, dp - env_start); 412 #endif 413 } 414 415 dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); 416 417 /* 418 * If we have enabled random stackgap, the stack itself has already 419 * been moved from a random location, but is still aligned to a page 420 * boundary. Provide the lower bits of random placement now. 421 */ 422 if (stackgap_random == 0) { 423 sgap = 0; 424 } else { 425 sgap = arc4random() & PAGE_MASK; 426 sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; 427 } 428 429 /* Now check if args & environ fit into new stack */ 430 len = ((argc + envc + 2 + ELF_AUX_WORDS) * sizeof(char *) + 431 sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; 432 433 len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; 434 435 if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ 436 error = ENOMEM; 437 goto bad; 438 } 439 440 /* adjust "active stack depth" for process VSZ */ 441 pack.ep_ssize = len; /* maybe should go elsewhere, but... */ 442 443 /* 444 * we're committed: any further errors will kill the process, so 445 * kill the other threads now. 446 */ 447 single_thread_set(p, SINGLE_EXIT, 1); 448 449 /* 450 * Prepare vmspace for remapping. Note that uvmspace_exec can replace 451 * ps_vmspace! 452 */ 453 uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 454 455 vm = pr->ps_vmspace; 456 /* Now map address space */ 457 vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); 458 vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - 459 trunc_page(pack.ep_taddr)); 460 vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); 461 vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - 462 trunc_page(pack.ep_daddr)); 463 vm->vm_dused = 0; 464 vm->vm_ssize = atop(round_page(pack.ep_ssize)); 465 vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; 466 vm->vm_minsaddr = (char *)pack.ep_minsaddr; 467 468 /* create the new process's VM space by running the vmcmds */ 469 #ifdef DIAGNOSTIC 470 if (pack.ep_vmcmds.evs_used == 0) 471 panic("execve: no vmcmds"); 472 #endif 473 error = exec_process_vmcmds(p, &pack); 474 475 /* if an error happened, deallocate and punt */ 476 if (error) 477 goto exec_abort; 478 479 #ifdef MACHINE_STACK_GROWS_UP 480 pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; 481 if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, 482 trunc_page(pr->ps_strings), PROT_NONE, 0, TRUE, FALSE)) 483 goto exec_abort; 484 #else 485 pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; 486 if (uvm_map_protect(&vm->vm_map, 487 round_page(pr->ps_strings + sizeof(arginfo)), 488 (vaddr_t)vm->vm_minsaddr, PROT_NONE, 0, TRUE, FALSE)) 489 goto exec_abort; 490 #endif 491 492 memset(&arginfo, 0, sizeof(arginfo)); 493 494 /* remember information about the process */ 495 arginfo.ps_nargvstr = argc; 496 arginfo.ps_nenvstr = envc; 497 498 #ifdef MACHINE_STACK_GROWS_UP 499 stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; 500 slen = len - sizeof(arginfo) - sgap; 501 #else 502 stack = (char *)(vm->vm_minsaddr - len); 503 #endif 504 /* Now copy argc, args & environ to new stack */ 505 if (!copyargs(&pack, &arginfo, stack, argp)) 506 goto exec_abort; 507 508 pr->ps_auxinfo = (vaddr_t)pack.ep_auxinfo; 509 510 /* copy out the process's ps_strings structure */ 511 if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) 512 goto exec_abort; 513 514 stopprofclock(pr); /* stop profiling */ 515 fdcloseexec(p); /* handle close on exec */ 516 execsigs(p); /* reset caught signals */ 517 TCB_SET(p, NULL); /* reset the TCB address */ 518 pr->ps_kbind_addr = 0; /* reset the kbind bits */ 519 pr->ps_kbind_cookie = 0; 520 arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie); 521 522 /* set command name & other accounting info */ 523 memset(pr->ps_comm, 0, sizeof(pr->ps_comm)); 524 strlcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, sizeof(pr->ps_comm)); 525 pr->ps_acflag &= ~AFORK; 526 527 /* record proc's vnode, for use by sysctl */ 528 otvp = pr->ps_textvp; 529 vref(pack.ep_vp); 530 pr->ps_textvp = pack.ep_vp; 531 if (otvp) 532 vrele(otvp); 533 534 /* 535 * XXX As a transition mechanism, we don't enforce branch 536 * target control flow integrity on partitions mounted with 537 * the wxallowed flag. 538 */ 539 if (pr->ps_textvp->v_mount && 540 (pr->ps_textvp->v_mount->mnt_flag & MNT_WXALLOWED)) 541 pack.ep_flags |= EXEC_NOBTCFI; 542 /* XXX XXX But enable it for chrome. */ 543 if (strcmp(p->p_p->ps_comm, "chrome") == 0) 544 pack.ep_flags &= ~EXEC_NOBTCFI; 545 546 atomic_setbits_int(&pr->ps_flags, PS_EXEC); 547 if (pr->ps_flags & PS_PPWAIT) { 548 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 549 atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); 550 wakeup(pr->ps_pptr); 551 } 552 553 /* 554 * If process does execve() while it has a mismatched real, 555 * effective, or saved uid/gid, we set PS_SUGIDEXEC. 556 */ 557 if (cred->cr_uid != cred->cr_ruid || 558 cred->cr_uid != cred->cr_svuid || 559 cred->cr_gid != cred->cr_rgid || 560 cred->cr_gid != cred->cr_svgid) 561 atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); 562 else 563 atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); 564 565 if (pr->ps_flags & PS_EXECPLEDGE) { 566 pr->ps_pledge = pr->ps_execpledge; 567 atomic_setbits_int(&pr->ps_flags, PS_PLEDGE); 568 } else { 569 atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE); 570 pr->ps_pledge = 0; 571 /* XXX XXX XXX XXX */ 572 /* Clear our unveil paths out so the child 573 * starts afresh 574 */ 575 unveil_destroy(pr); 576 pr->ps_uvdone = 0; 577 } 578 579 /* 580 * deal with set[ug]id. 581 * MNT_NOEXEC has already been used to disable s[ug]id. 582 */ 583 if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { 584 int i; 585 586 atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); 587 588 #ifdef KTRACE 589 /* 590 * If process is being ktraced, turn off - unless 591 * root set it. 592 */ 593 if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) 594 ktrcleartrace(pr); 595 #endif 596 p->p_ucred = cred = crcopy(cred); 597 if (attr.va_mode & VSUID) 598 cred->cr_uid = attr.va_uid; 599 if (attr.va_mode & VSGID) 600 cred->cr_gid = attr.va_gid; 601 602 /* 603 * For set[ug]id processes, a few caveats apply to 604 * stdin, stdout, and stderr. 605 */ 606 error = 0; 607 fdplock(p->p_fd); 608 for (i = 0; i < 3; i++) { 609 struct file *fp = NULL; 610 611 /* 612 * NOTE - This will never return NULL because of 613 * immature fds. The file descriptor table is not 614 * shared because we're suid. 615 */ 616 fp = fd_getfile(p->p_fd, i); 617 618 /* 619 * Ensure that stdin, stdout, and stderr are already 620 * allocated. We do not want userland to accidentally 621 * allocate descriptors in this range which has implied 622 * meaning to libc. 623 */ 624 if (fp == NULL) { 625 short flags = FREAD | (i == 0 ? 0 : FWRITE); 626 struct vnode *vp; 627 int indx; 628 629 if ((error = falloc(p, &fp, &indx)) != 0) 630 break; 631 #ifdef DIAGNOSTIC 632 if (indx != i) 633 panic("sys_execve: falloc indx != i"); 634 #endif 635 if ((error = cdevvp(getnulldev(), &vp)) != 0) { 636 fdremove(p->p_fd, indx); 637 closef(fp, p); 638 break; 639 } 640 if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { 641 fdremove(p->p_fd, indx); 642 closef(fp, p); 643 vrele(vp); 644 break; 645 } 646 if (flags & FWRITE) 647 vp->v_writecount++; 648 fp->f_flag = flags; 649 fp->f_type = DTYPE_VNODE; 650 fp->f_ops = &vnops; 651 fp->f_data = (caddr_t)vp; 652 fdinsert(p->p_fd, indx, 0, fp); 653 } 654 FRELE(fp, p); 655 } 656 fdpunlock(p->p_fd); 657 if (error) 658 goto exec_abort; 659 } else 660 atomic_clearbits_int(&pr->ps_flags, PS_SUGID); 661 662 /* 663 * Reset the saved ugids and update the process's copy of the 664 * creds if the creds have been changed 665 */ 666 if (cred->cr_uid != cred->cr_svuid || 667 cred->cr_gid != cred->cr_svgid) { 668 /* make sure we have unshared ucreds */ 669 p->p_ucred = cred = crcopy(cred); 670 cred->cr_svuid = cred->cr_uid; 671 cred->cr_svgid = cred->cr_gid; 672 } 673 674 if (pr->ps_ucred != cred) { 675 struct ucred *ocred; 676 677 ocred = pr->ps_ucred; 678 crhold(cred); 679 pr->ps_ucred = cred; 680 crfree(ocred); 681 } 682 683 if (pr->ps_flags & PS_SUGIDEXEC) { 684 cancel_all_itimers(); 685 } 686 687 /* reset CPU time usage for the thread, but not the process */ 688 timespecclear(&p->p_tu.tu_runtime); 689 p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; 690 691 memset(p->p_name, 0, sizeof p->p_name); 692 693 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 694 695 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 696 vn_close(pack.ep_vp, FREAD, cred, p); 697 698 /* 699 * notify others that we exec'd 700 */ 701 knote_locked(&pr->ps_klist, NOTE_EXEC); 702 703 /* map the process's timekeep page, needs to be before exec_elf_fixup */ 704 if (exec_timekeep_map(pr)) 705 goto free_pack_abort; 706 707 /* setup new registers and do misc. setup. */ 708 if (exec_elf_fixup(p, &pack) != 0) 709 goto free_pack_abort; 710 #ifdef MACHINE_STACK_GROWS_UP 711 setregs(p, &pack, (u_long)stack + slen, &arginfo); 712 #else 713 setregs(p, &pack, (u_long)stack, &arginfo); 714 #endif 715 716 /* map the process's signal trampoline code */ 717 if (exec_sigcode_map(pr)) 718 goto free_pack_abort; 719 720 #ifdef __HAVE_EXEC_MD_MAP 721 /* perform md specific mappings that process might need */ 722 if (exec_md_map(p, &pack)) 723 goto free_pack_abort; 724 #endif 725 726 if (pr->ps_flags & PS_TRACED) 727 psignal(p, SIGTRAP); 728 729 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 730 731 p->p_descfd = 255; 732 if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) 733 p->p_descfd = pack.ep_fd; 734 735 if (pack.ep_flags & EXEC_WXNEEDED) 736 atomic_setbits_int(&p->p_p->ps_flags, PS_WXNEEDED); 737 else 738 atomic_clearbits_int(&p->p_p->ps_flags, PS_WXNEEDED); 739 740 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 741 single_thread_clear(p, P_SUSPSIG); 742 743 /* setregs() sets up all the registers, so just 'return' */ 744 return EJUSTRETURN; 745 746 bad: 747 /* free the vmspace-creation commands, and release their references */ 748 kill_vmcmds(&pack.ep_vmcmds); 749 /* kill any opened file descriptor, if necessary */ 750 if (pack.ep_flags & EXEC_HASFD) { 751 pack.ep_flags &= ~EXEC_HASFD; 752 fdplock(p->p_fd); 753 /* fdrelease unlocks p->p_fd. */ 754 (void) fdrelease(p, pack.ep_fd); 755 } 756 if (pack.ep_interp != NULL) 757 pool_put(&namei_pool, pack.ep_interp); 758 free(pack.ep_args, M_TEMP, sizeof *pack.ep_args); 759 /* close and put the exec'd file */ 760 vn_close(pack.ep_vp, FREAD, cred, p); 761 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 762 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 763 764 freehdr: 765 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 766 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 767 single_thread_clear(p, P_SUSPSIG); 768 769 return (error); 770 771 exec_abort: 772 /* 773 * the old process doesn't exist anymore. exit gracefully. 774 * get rid of the (new) address space we have created, if any, get rid 775 * of our namei data and vnode, and exit noting failure 776 */ 777 uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 778 if (pack.ep_interp != NULL) 779 pool_put(&namei_pool, pack.ep_interp); 780 free(pack.ep_args, M_TEMP, sizeof *pack.ep_args); 781 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 782 vn_close(pack.ep_vp, FREAD, cred, p); 783 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 784 785 free_pack_abort: 786 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 787 exit1(p, 0, SIGABRT, EXIT_NORMAL); 788 789 /* NOTREACHED */ 790 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 791 792 return (0); 793 } 794 795 796 int 797 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack, 798 void *argp) 799 { 800 char **cpp = stack; 801 char *dp, *sp; 802 size_t len; 803 void *nullp = NULL; 804 long argc = arginfo->ps_nargvstr; 805 int envc = arginfo->ps_nenvstr; 806 807 if (copyout(&argc, cpp++, sizeof(argc))) 808 return (0); 809 810 dp = (char *) (cpp + argc + envc + 2 + ELF_AUX_WORDS); 811 sp = argp; 812 813 /* XXX don't copy them out, remap them! */ 814 arginfo->ps_argvstr = cpp; /* remember location of argv for later */ 815 816 for (; --argc >= 0; sp += len, dp += len) 817 if (copyout(&dp, cpp++, sizeof(dp)) || 818 copyoutstr(sp, dp, ARG_MAX, &len)) 819 return (0); 820 821 if (copyout(&nullp, cpp++, sizeof(nullp))) 822 return (0); 823 824 arginfo->ps_envstr = cpp; /* remember location of envp for later */ 825 826 for (; --envc >= 0; sp += len, dp += len) 827 if (copyout(&dp, cpp++, sizeof(dp)) || 828 copyoutstr(sp, dp, ARG_MAX, &len)) 829 return (0); 830 831 if (copyout(&nullp, cpp++, sizeof(nullp))) 832 return (0); 833 834 /* if this process needs auxinfo, note where to place it */ 835 if (pack->ep_args != NULL) 836 pack->ep_auxinfo = cpp; 837 838 return (1); 839 } 840 841 int 842 exec_sigcode_map(struct process *pr) 843 { 844 extern char sigcode[], esigcode[], sigcoderet[]; 845 vsize_t sz; 846 847 sz = (vaddr_t)esigcode - (vaddr_t)sigcode; 848 849 /* 850 * If we don't have a sigobject yet, create one. 851 * 852 * sigobject is an anonymous memory object (just like SYSV shared 853 * memory) that we keep a permanent reference to and that we map 854 * in all processes that need this sigcode. The creation is simple, 855 * we create an object, add a permanent reference to it, map it in 856 * kernel space, copy out the sigcode to it and unmap it. Then we map 857 * it with PROT_EXEC into the process just the way sys_mmap would map it. 858 */ 859 if (sigobject == NULL) { 860 extern int sigfillsiz; 861 extern u_char sigfill[]; 862 size_t off, left; 863 vaddr_t va; 864 int r; 865 866 sigobject = uao_create(sz, 0); 867 uao_reference(sigobject); /* permanent reference */ 868 869 if ((r = uvm_map(kernel_map, &va, round_page(sz), sigobject, 870 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 871 MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) { 872 uao_detach(sigobject); 873 return (ENOMEM); 874 } 875 876 for (off = 0, left = round_page(sz); left != 0; 877 off += sigfillsiz) { 878 size_t chunk = ulmin(left, sigfillsiz); 879 memcpy((caddr_t)va + off, sigfill, chunk); 880 left -= chunk; 881 } 882 memcpy((caddr_t)va, sigcode, sz); 883 884 (void) uvm_map_protect(kernel_map, va, round_page(sz), 885 PROT_READ, 0, FALSE, FALSE); 886 sigcode_va = va; 887 sigcode_sz = round_page(sz); 888 } 889 890 pr->ps_sigcode = 0; /* no hint */ 891 uao_reference(sigobject); 892 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz), 893 sigobject, 0, 0, UVM_MAPFLAG(PROT_EXEC, 894 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY, 895 MADV_RANDOM, UVM_FLAG_COPYONW | UVM_FLAG_SYSCALL))) { 896 uao_detach(sigobject); 897 return (ENOMEM); 898 } 899 uvm_map_immutable(&pr->ps_vmspace->vm_map, pr->ps_sigcode, 900 pr->ps_sigcode + round_page(sz), 1); 901 902 /* Calculate PC at point of sigreturn entry */ 903 pr->ps_sigcoderet = pr->ps_sigcode + (sigcoderet - sigcode); 904 905 return (0); 906 } 907 908 int 909 exec_timekeep_map(struct process *pr) 910 { 911 size_t timekeep_sz = round_page(sizeof(struct timekeep)); 912 913 /* 914 * Similar to the sigcode object 915 */ 916 if (timekeep_object == NULL) { 917 vaddr_t va = 0; 918 919 timekeep_object = uao_create(timekeep_sz, 0); 920 uao_reference(timekeep_object); 921 922 if (uvm_map(kernel_map, &va, timekeep_sz, timekeep_object, 923 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 924 MAP_INHERIT_SHARE, MADV_RANDOM, 0))) { 925 uao_detach(timekeep_object); 926 timekeep_object = NULL; 927 return (ENOMEM); 928 } 929 if (uvm_fault_wire(kernel_map, va, va + timekeep_sz, 930 PROT_READ | PROT_WRITE)) { 931 uvm_unmap(kernel_map, va, va + timekeep_sz); 932 uao_detach(timekeep_object); 933 timekeep_object = NULL; 934 return (ENOMEM); 935 } 936 937 timekeep = (struct timekeep *)va; 938 timekeep->tk_version = TK_VERSION; 939 } 940 941 pr->ps_timekeep = 0; /* no hint */ 942 uao_reference(timekeep_object); 943 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_timekeep, timekeep_sz, 944 timekeep_object, 0, 0, UVM_MAPFLAG(PROT_READ, PROT_READ, 945 MAP_INHERIT_COPY, MADV_RANDOM, 0))) { 946 uao_detach(timekeep_object); 947 return (ENOMEM); 948 } 949 uvm_map_immutable(&pr->ps_vmspace->vm_map, pr->ps_timekeep, 950 pr->ps_timekeep + timekeep_sz, 1); 951 952 return (0); 953 } 954