xref: /openbsd-src/sys/kern/kern_exec.c (revision 8550894424f8a4aa4aafb6cd57229dd6ed7cd9dd)
1 /*	$OpenBSD: kern_exec.c,v 1.243 2023/01/13 23:02:43 kettenis Exp $	*/
2 /*	$NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $	*/
3 
4 /*-
5  * Copyright (C) 1993, 1994 Christopher G. Demetriou
6  * Copyright (C) 1992 Wolfgang Solfrank.
7  * Copyright (C) 1992 TooLs GmbH.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by TooLs GmbH.
21  * 4. The name of TooLs GmbH may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/filedesc.h>
39 #include <sys/proc.h>
40 #include <sys/mount.h>
41 #include <sys/malloc.h>
42 #include <sys/pool.h>
43 #include <sys/namei.h>
44 #include <sys/vnode.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/acct.h>
48 #include <sys/exec.h>
49 #include <sys/exec_elf.h>
50 #include <sys/ktrace.h>
51 #include <sys/resourcevar.h>
52 #include <sys/mman.h>
53 #include <sys/signalvar.h>
54 #include <sys/stat.h>
55 #include <sys/conf.h>
56 #include <sys/pledge.h>
57 #ifdef SYSVSHM
58 #include <sys/shm.h>
59 #endif
60 
61 #include <sys/syscallargs.h>
62 
63 #include <uvm/uvm_extern.h>
64 #include <machine/tcb.h>
65 
66 #include <sys/timetc.h>
67 
68 struct uvm_object *sigobject;		/* shared sigcode object */
69 vaddr_t sigcode_va;
70 vsize_t sigcode_sz;
71 struct uvm_object *timekeep_object;
72 struct timekeep *timekeep;
73 
74 void	unveil_destroy(struct process *ps);
75 
76 const struct kmem_va_mode kv_exec = {
77 	.kv_wait = 1,
78 	.kv_map = &exec_map
79 };
80 
81 /*
82  * Map the shared signal code.
83  */
84 int exec_sigcode_map(struct process *);
85 
86 /*
87  * Map the shared timekeep page.
88  */
89 int exec_timekeep_map(struct process *);
90 
91 /*
92  * If non-zero, stackgap_random specifies the upper limit of the random gap size
93  * added to the fixed stack position. Must be n^2.
94  */
95 int stackgap_random = STACKGAP_RANDOM;
96 
97 /*
98  * check exec:
99  * given an "executable" described in the exec package's namei info,
100  * see what we can do with it.
101  *
102  * ON ENTRY:
103  *	exec package with appropriate namei info
104  *	proc pointer of exec'ing proc
105  *	NO SELF-LOCKED VNODES
106  *
107  * ON EXIT:
108  *	error:	nothing held, etc.  exec header still allocated.
109  *	ok:	filled exec package, one locked vnode.
110  *
111  * EXEC SWITCH ENTRY:
112  * 	Locked vnode to check, exec package, proc.
113  *
114  * EXEC SWITCH EXIT:
115  *	ok:	return 0, filled exec package, one locked vnode.
116  *	error:	destructive:
117  *			everything deallocated except exec header.
118  *		non-destructive:
119  *			error code, locked vnode, exec header unmodified
120  */
121 int
122 check_exec(struct proc *p, struct exec_package *epp)
123 {
124 	int error, i;
125 	struct vnode *vp;
126 	struct nameidata *ndp;
127 	size_t resid;
128 
129 	ndp = epp->ep_ndp;
130 	ndp->ni_cnd.cn_nameiop = LOOKUP;
131 	ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
132 	if (epp->ep_flags & EXEC_INDIR)
133 		ndp->ni_cnd.cn_flags |= BYPASSUNVEIL;
134 	/* first get the vnode */
135 	if ((error = namei(ndp)) != 0)
136 		return (error);
137 	epp->ep_vp = vp = ndp->ni_vp;
138 
139 	/* check for regular file */
140 	if (vp->v_type != VREG) {
141 		error = EACCES;
142 		goto bad1;
143 	}
144 
145 	/* get attributes */
146 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
147 		goto bad1;
148 
149 	/* Check mount point */
150 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
151 		error = EACCES;
152 		goto bad1;
153 	}
154 
155 	/* SUID programs may not be started with execpromises */
156 	if ((epp->ep_vap->va_mode & (VSUID | VSGID)) &&
157 	    (p->p_p->ps_flags & PS_EXECPLEDGE)) {
158 		error = EACCES;
159 		goto bad1;
160 	}
161 
162 	if ((vp->v_mount->mnt_flag & MNT_NOSUID))
163 		epp->ep_vap->va_mode &= ~(VSUID | VSGID);
164 
165 	/* check access.  for root we have to see if any exec bit on */
166 	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
167 		goto bad1;
168 	if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
169 		error = EACCES;
170 		goto bad1;
171 	}
172 
173 	/* try to open it */
174 	if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
175 		goto bad1;
176 
177 	/* unlock vp, we need it unlocked from here */
178 	VOP_UNLOCK(vp);
179 
180 	/* now we have the file, get the exec header */
181 	error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
182 	    UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
183 	if (error)
184 		goto bad2;
185 	epp->ep_hdrvalid = epp->ep_hdrlen - resid;
186 
187 	/*
188 	 * set up the vmcmds for creation of the process
189 	 * address space
190 	 */
191 	error = ENOEXEC;
192 	for (i = 0; i < nexecs && error != 0; i++) {
193 		int newerror;
194 
195 		if (execsw[i].es_check == NULL)
196 			continue;
197 		newerror = (*execsw[i].es_check)(p, epp);
198 		/* make sure the first "interesting" error code is saved. */
199 		if (!newerror || error == ENOEXEC)
200 			error = newerror;
201 		if (epp->ep_flags & EXEC_DESTR && error != 0)
202 			return (error);
203 	}
204 	if (!error) {
205 		/* check that entry point is sane */
206 		if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
207 			error = ENOEXEC;
208 		}
209 
210 		/* check limits */
211 		if ((epp->ep_tsize > MAXTSIZ) ||
212 		    (epp->ep_dsize > lim_cur(RLIMIT_DATA)))
213 			error = ENOMEM;
214 
215 		if (!error)
216 			return (0);
217 	}
218 
219 	/*
220 	 * free any vmspace-creation commands,
221 	 * and release their references
222 	 */
223 	kill_vmcmds(&epp->ep_vmcmds);
224 
225 bad2:
226 	/*
227 	 * close the vnode, free the pathname buf, and punt.
228 	 */
229 	vn_close(vp, FREAD, p->p_ucred, p);
230 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
231 	return (error);
232 
233 bad1:
234 	/*
235 	 * free the namei pathname buffer, and put the vnode
236 	 * (which we don't yet have open).
237 	 */
238 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
239 	vput(vp);
240 	return (error);
241 }
242 
243 /*
244  * exec system call
245  */
246 int
247 sys_execve(struct proc *p, void *v, register_t *retval)
248 {
249 	struct sys_execve_args /* {
250 		syscallarg(const char *) path;
251 		syscallarg(char *const *) argp;
252 		syscallarg(char *const *) envp;
253 	} */ *uap = v;
254 	int error;
255 	struct exec_package pack;
256 	struct nameidata nid;
257 	struct vattr attr;
258 	struct ucred *cred = p->p_ucred;
259 	char *argp;
260 	char * const *cpp, *dp, *sp;
261 #ifdef KTRACE
262 	char *env_start;
263 #endif
264 	struct process *pr = p->p_p;
265 	long argc, envc;
266 	size_t len, sgap, dstsize;
267 #ifdef MACHINE_STACK_GROWS_UP
268 	size_t slen;
269 #endif
270 	char *stack;
271 	struct ps_strings arginfo;
272 	struct vmspace *vm;
273 	struct vnode *otvp;
274 
275 	/* get other threads to stop */
276 	if ((error = single_thread_set(p, SINGLE_UNWIND, 1)))
277 		return (error);
278 
279 	/*
280 	 * Cheap solution to complicated problems.
281 	 * Mark this process as "leave me alone, I'm execing".
282 	 */
283 	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);
284 
285 	NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
286 	nid.ni_pledge = PLEDGE_EXEC;
287 	nid.ni_unveil = UNVEIL_EXEC;
288 
289 	/*
290 	 * initialize the fields of the exec package.
291 	 */
292 	pack.ep_name = (char *)SCARG(uap, path);
293 	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
294 	pack.ep_hdrlen = exec_maxhdrsz;
295 	pack.ep_hdrvalid = 0;
296 	pack.ep_ndp = &nid;
297 	pack.ep_interp = NULL;
298 	pack.ep_args = NULL;
299 	pack.ep_auxinfo = NULL;
300 	VMCMDSET_INIT(&pack.ep_vmcmds);
301 	pack.ep_vap = &attr;
302 	pack.ep_flags = 0;
303 
304 	/* see if we can run it. */
305 	if ((error = check_exec(p, &pack)) != 0) {
306 		goto freehdr;
307 	}
308 
309 	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
310 
311 	/* allocate an argument buffer */
312 	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
313 #ifdef DIAGNOSTIC
314 	if (argp == NULL)
315 		panic("execve: argp == NULL");
316 #endif
317 	dp = argp;
318 	argc = 0;
319 
320 	/*
321 	 * Copy the fake args list, if there's one, freeing it as we go.
322 	 * exec_script_makecmds() allocates either 2 or 3 fake args bounded
323 	 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen.
324 	 */
325 	if (pack.ep_flags & EXEC_HASARGL) {
326 		dstsize = NCARGS;
327 		for(; pack.ep_fa[argc] != NULL; argc++) {
328 			len = strlcpy(dp, pack.ep_fa[argc], dstsize);
329 			len++;
330 			dp += len; dstsize -= len;
331 			if (pack.ep_fa[argc+1] != NULL)
332 				free(pack.ep_fa[argc], M_EXEC, len);
333 			else
334 				free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN);
335 		}
336 		free(pack.ep_fa, M_EXEC, 4 * sizeof(char *));
337 		pack.ep_flags &= ~EXEC_HASARGL;
338 	}
339 
340 	/* Now get argv & environment */
341 	if (!(cpp = SCARG(uap, argp))) {
342 		error = EFAULT;
343 		goto bad;
344 	}
345 
346 	if (pack.ep_flags & EXEC_SKIPARG)
347 		cpp++;
348 
349 	while (1) {
350 		len = argp + ARG_MAX - dp;
351 		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
352 			goto bad;
353 		if (!sp)
354 			break;
355 		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
356 			if (error == ENAMETOOLONG)
357 				error = E2BIG;
358 			goto bad;
359 		}
360 		dp += len;
361 		cpp++;
362 		argc++;
363 	}
364 
365 	/* must have at least one argument */
366 	if (argc == 0) {
367 		error = EINVAL;
368 		goto bad;
369 	}
370 
371 #ifdef KTRACE
372 	if (KTRPOINT(p, KTR_EXECARGS))
373 		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
374 #endif
375 
376 	envc = 0;
377 	/* environment does not need to be there */
378 	if ((cpp = SCARG(uap, envp)) != NULL ) {
379 #ifdef KTRACE
380 		env_start = dp;
381 #endif
382 		while (1) {
383 			len = argp + ARG_MAX - dp;
384 			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
385 				goto bad;
386 			if (!sp)
387 				break;
388 			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
389 				if (error == ENAMETOOLONG)
390 					error = E2BIG;
391 				goto bad;
392 			}
393 			dp += len;
394 			cpp++;
395 			envc++;
396 		}
397 
398 #ifdef KTRACE
399 		if (KTRPOINT(p, KTR_EXECENV))
400 			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
401 #endif
402 	}
403 
404 	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);
405 
406 	/*
407 	 * If we have enabled random stackgap, the stack itself has already
408 	 * been moved from a random location, but is still aligned to a page
409 	 * boundary.  Provide the lower bits of random placement now.
410 	 */
411 	if (stackgap_random == 0) {
412 		sgap = 0;
413 	} else {
414 		sgap = arc4random() & PAGE_MASK;
415 		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
416 	}
417 
418 	/* Now check if args & environ fit into new stack */
419 	len = ((argc + envc + 2 + ELF_AUX_WORDS) * sizeof(char *) +
420 	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;
421 
422 	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;
423 
424 	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
425 		error = ENOMEM;
426 		goto bad;
427 	}
428 
429 	/* adjust "active stack depth" for process VSZ */
430 	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */
431 
432 	/*
433 	 * we're committed: any further errors will kill the process, so
434 	 * kill the other threads now.
435 	 */
436 	single_thread_set(p, SINGLE_EXIT, 1);
437 
438 	/*
439 	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
440 	 * ps_vmspace!
441 	 */
442 	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
443 
444 	vm = pr->ps_vmspace;
445 	/* Now map address space */
446 	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
447 	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
448 	    trunc_page(pack.ep_taddr));
449 	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
450 	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
451 	    trunc_page(pack.ep_daddr));
452 	vm->vm_dused = 0;
453 	vm->vm_ssize = atop(round_page(pack.ep_ssize));
454 	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
455 	vm->vm_minsaddr = (char *)pack.ep_minsaddr;
456 
457 	/* create the new process's VM space by running the vmcmds */
458 #ifdef DIAGNOSTIC
459 	if (pack.ep_vmcmds.evs_used == 0)
460 		panic("execve: no vmcmds");
461 #endif
462 	error = exec_process_vmcmds(p, &pack);
463 
464 	/* if an error happened, deallocate and punt */
465 	if (error)
466 		goto exec_abort;
467 
468 #ifdef MACHINE_STACK_GROWS_UP
469 	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
470         if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
471             trunc_page(pr->ps_strings), PROT_NONE, 0, TRUE, FALSE))
472                 goto exec_abort;
473 #else
474 	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
475         if (uvm_map_protect(&vm->vm_map,
476             round_page(pr->ps_strings + sizeof(arginfo)),
477             (vaddr_t)vm->vm_minsaddr, PROT_NONE, 0, TRUE, FALSE))
478                 goto exec_abort;
479 #endif
480 
481 	memset(&arginfo, 0, sizeof(arginfo));
482 
483 	/* remember information about the process */
484 	arginfo.ps_nargvstr = argc;
485 	arginfo.ps_nenvstr = envc;
486 
487 #ifdef MACHINE_STACK_GROWS_UP
488 	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
489 	slen = len - sizeof(arginfo) - sgap;
490 #else
491 	stack = (char *)(vm->vm_minsaddr - len);
492 #endif
493 	/* Now copy argc, args & environ to new stack */
494 	if (!copyargs(&pack, &arginfo, stack, argp))
495 		goto exec_abort;
496 
497 	pr->ps_auxinfo = (vaddr_t)pack.ep_auxinfo;
498 
499 	/* copy out the process's ps_strings structure */
500 	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
501 		goto exec_abort;
502 
503 	stopprofclock(pr);	/* stop profiling */
504 	fdcloseexec(p);		/* handle close on exec */
505 	execsigs(p);		/* reset caught signals */
506 	TCB_SET(p, NULL);	/* reset the TCB address */
507 	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
508 	pr->ps_kbind_cookie = 0;
509 	arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie);
510 
511 	/* set command name & other accounting info */
512 	memset(pr->ps_comm, 0, sizeof(pr->ps_comm));
513 	strlcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, sizeof(pr->ps_comm));
514 	pr->ps_acflag &= ~AFORK;
515 
516 	/* record proc's vnode, for use by sysctl */
517 	otvp = pr->ps_textvp;
518 	vref(pack.ep_vp);
519 	pr->ps_textvp = pack.ep_vp;
520 	if (otvp)
521 		vrele(otvp);
522 
523 	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
524 	if (pr->ps_flags & PS_PPWAIT) {
525 		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
526 		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
527 		wakeup(pr->ps_pptr);
528 	}
529 
530 	/*
531 	 * If process does execve() while it has a mismatched real,
532 	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
533 	 */
534 	if (cred->cr_uid != cred->cr_ruid ||
535 	    cred->cr_uid != cred->cr_svuid ||
536 	    cred->cr_gid != cred->cr_rgid ||
537 	    cred->cr_gid != cred->cr_svgid)
538 		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
539 	else
540 		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);
541 
542 	if (pr->ps_flags & PS_EXECPLEDGE) {
543 		pr->ps_pledge = pr->ps_execpledge;
544 		atomic_setbits_int(&pr->ps_flags, PS_PLEDGE);
545 	} else {
546 		atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE);
547 		pr->ps_pledge = 0;
548 		/* XXX XXX XXX XXX */
549 		/* Clear our unveil paths out so the child
550 		 * starts afresh
551 		 */
552 		unveil_destroy(pr);
553 		pr->ps_uvdone = 0;
554 	}
555 
556 	/*
557 	 * deal with set[ug]id.
558 	 * MNT_NOEXEC has already been used to disable s[ug]id.
559 	 */
560 	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
561 		int i;
562 
563 		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);
564 
565 #ifdef KTRACE
566 		/*
567 		 * If process is being ktraced, turn off - unless
568 		 * root set it.
569 		 */
570 		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
571 			ktrcleartrace(pr);
572 #endif
573 		p->p_ucred = cred = crcopy(cred);
574 		if (attr.va_mode & VSUID)
575 			cred->cr_uid = attr.va_uid;
576 		if (attr.va_mode & VSGID)
577 			cred->cr_gid = attr.va_gid;
578 
579 		/*
580 		 * For set[ug]id processes, a few caveats apply to
581 		 * stdin, stdout, and stderr.
582 		 */
583 		error = 0;
584 		fdplock(p->p_fd);
585 		for (i = 0; i < 3; i++) {
586 			struct file *fp = NULL;
587 
588 			/*
589 			 * NOTE - This will never return NULL because of
590 			 * immature fds. The file descriptor table is not
591 			 * shared because we're suid.
592 			 */
593 			fp = fd_getfile(p->p_fd, i);
594 
595 			/*
596 			 * Ensure that stdin, stdout, and stderr are already
597 			 * allocated.  We do not want userland to accidentally
598 			 * allocate descriptors in this range which has implied
599 			 * meaning to libc.
600 			 */
601 			if (fp == NULL) {
602 				short flags = FREAD | (i == 0 ? 0 : FWRITE);
603 				struct vnode *vp;
604 				int indx;
605 
606 				if ((error = falloc(p, &fp, &indx)) != 0)
607 					break;
608 #ifdef DIAGNOSTIC
609 				if (indx != i)
610 					panic("sys_execve: falloc indx != i");
611 #endif
612 				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
613 					fdremove(p->p_fd, indx);
614 					closef(fp, p);
615 					break;
616 				}
617 				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
618 					fdremove(p->p_fd, indx);
619 					closef(fp, p);
620 					vrele(vp);
621 					break;
622 				}
623 				if (flags & FWRITE)
624 					vp->v_writecount++;
625 				fp->f_flag = flags;
626 				fp->f_type = DTYPE_VNODE;
627 				fp->f_ops = &vnops;
628 				fp->f_data = (caddr_t)vp;
629 				fdinsert(p->p_fd, indx, 0, fp);
630 			}
631 			FRELE(fp, p);
632 		}
633 		fdpunlock(p->p_fd);
634 		if (error)
635 			goto exec_abort;
636 	} else
637 		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);
638 
639 	/*
640 	 * Reset the saved ugids and update the process's copy of the
641 	 * creds if the creds have been changed
642 	 */
643 	if (cred->cr_uid != cred->cr_svuid ||
644 	    cred->cr_gid != cred->cr_svgid) {
645 		/* make sure we have unshared ucreds */
646 		p->p_ucred = cred = crcopy(cred);
647 		cred->cr_svuid = cred->cr_uid;
648 		cred->cr_svgid = cred->cr_gid;
649 	}
650 
651 	if (pr->ps_ucred != cred) {
652 		struct ucred *ocred;
653 
654 		ocred = pr->ps_ucred;
655 		crhold(cred);
656 		pr->ps_ucred = cred;
657 		crfree(ocred);
658 	}
659 
660 	if (pr->ps_flags & PS_SUGIDEXEC) {
661 		cancel_all_itimers();
662 	}
663 
664 	/* reset CPU time usage for the thread, but not the process */
665 	timespecclear(&p->p_tu.tu_runtime);
666 	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
667 
668 	memset(p->p_name, 0, sizeof p->p_name);
669 
670 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
671 
672 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
673 	vn_close(pack.ep_vp, FREAD, cred, p);
674 
675 	/*
676 	 * notify others that we exec'd
677 	 */
678 	KNOTE(&pr->ps_klist, NOTE_EXEC);
679 
680 	/* map the process's timekeep page, needs to be before exec_elf_fixup */
681 	if (exec_timekeep_map(pr))
682 		goto free_pack_abort;
683 
684 	/* setup new registers and do misc. setup. */
685 	if (exec_elf_fixup(p, &pack) != 0)
686 		goto free_pack_abort;
687 #ifdef MACHINE_STACK_GROWS_UP
688 	setregs(p, &pack, (u_long)stack + slen, &arginfo);
689 #else
690 	setregs(p, &pack, (u_long)stack, &arginfo);
691 #endif
692 
693 	/* map the process's signal trampoline code */
694 	if (exec_sigcode_map(pr))
695 		goto free_pack_abort;
696 
697 #ifdef __HAVE_EXEC_MD_MAP
698 	/* perform md specific mappings that process might need */
699 	if (exec_md_map(p, &pack))
700 		goto free_pack_abort;
701 #endif
702 
703 	if (pr->ps_flags & PS_TRACED)
704 		psignal(p, SIGTRAP);
705 
706 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
707 
708 	p->p_descfd = 255;
709 	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
710 		p->p_descfd = pack.ep_fd;
711 
712 	if (pack.ep_flags & EXEC_WXNEEDED)
713 		atomic_setbits_int(&p->p_p->ps_flags, PS_WXNEEDED);
714 	else
715 		atomic_clearbits_int(&p->p_p->ps_flags, PS_WXNEEDED);
716 
717 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
718 	single_thread_clear(p, P_SUSPSIG);
719 
720 	/* setregs() sets up all the registers, so just 'return' */
721 	return EJUSTRETURN;
722 
723 bad:
724 	/* free the vmspace-creation commands, and release their references */
725 	kill_vmcmds(&pack.ep_vmcmds);
726 	/* kill any opened file descriptor, if necessary */
727 	if (pack.ep_flags & EXEC_HASFD) {
728 		pack.ep_flags &= ~EXEC_HASFD;
729 		fdplock(p->p_fd);
730 		/* fdrelease unlocks p->p_fd. */
731 		(void) fdrelease(p, pack.ep_fd);
732 	}
733 	if (pack.ep_interp != NULL)
734 		pool_put(&namei_pool, pack.ep_interp);
735 	free(pack.ep_args, M_TEMP, sizeof *pack.ep_args);
736 	/* close and put the exec'd file */
737 	vn_close(pack.ep_vp, FREAD, cred, p);
738 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
739 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
740 
741 freehdr:
742 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
743 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
744 	single_thread_clear(p, P_SUSPSIG);
745 
746 	return (error);
747 
748 exec_abort:
749 	/*
750 	 * the old process doesn't exist anymore.  exit gracefully.
751 	 * get rid of the (new) address space we have created, if any, get rid
752 	 * of our namei data and vnode, and exit noting failure
753 	 */
754 	uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
755 	if (pack.ep_interp != NULL)
756 		pool_put(&namei_pool, pack.ep_interp);
757 	free(pack.ep_args, M_TEMP, sizeof *pack.ep_args);
758 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
759 	vn_close(pack.ep_vp, FREAD, cred, p);
760 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
761 
762 free_pack_abort:
763 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
764 	exit1(p, 0, SIGABRT, EXIT_NORMAL);
765 
766 	/* NOTREACHED */
767 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
768 
769 	return (0);
770 }
771 
772 
773 int
774 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack,
775     void *argp)
776 {
777 	char **cpp = stack;
778 	char *dp, *sp;
779 	size_t len;
780 	void *nullp = NULL;
781 	long argc = arginfo->ps_nargvstr;
782 	int envc = arginfo->ps_nenvstr;
783 
784 	if (copyout(&argc, cpp++, sizeof(argc)))
785 		return (0);
786 
787 	dp = (char *) (cpp + argc + envc + 2 + ELF_AUX_WORDS);
788 	sp = argp;
789 
790 	/* XXX don't copy them out, remap them! */
791 	arginfo->ps_argvstr = cpp; /* remember location of argv for later */
792 
793 	for (; --argc >= 0; sp += len, dp += len)
794 		if (copyout(&dp, cpp++, sizeof(dp)) ||
795 		    copyoutstr(sp, dp, ARG_MAX, &len))
796 			return (0);
797 
798 	if (copyout(&nullp, cpp++, sizeof(nullp)))
799 		return (0);
800 
801 	arginfo->ps_envstr = cpp; /* remember location of envp for later */
802 
803 	for (; --envc >= 0; sp += len, dp += len)
804 		if (copyout(&dp, cpp++, sizeof(dp)) ||
805 		    copyoutstr(sp, dp, ARG_MAX, &len))
806 			return (0);
807 
808 	if (copyout(&nullp, cpp++, sizeof(nullp)))
809 		return (0);
810 
811 	/* if this process needs auxinfo, note where to place it */
812 	if (pack->ep_args != NULL)
813 		pack->ep_auxinfo = cpp;
814 
815 	return (1);
816 }
817 
818 int
819 exec_sigcode_map(struct process *pr)
820 {
821 	extern char sigcode[], esigcode[], sigcoderet[];
822 	vsize_t sz;
823 
824 	sz = (vaddr_t)esigcode - (vaddr_t)sigcode;
825 
826 	/*
827 	 * If we don't have a sigobject yet, create one.
828 	 *
829 	 * sigobject is an anonymous memory object (just like SYSV shared
830 	 * memory) that we keep a permanent reference to and that we map
831 	 * in all processes that need this sigcode. The creation is simple,
832 	 * we create an object, add a permanent reference to it, map it in
833 	 * kernel space, copy out the sigcode to it and unmap it.  Then we map
834 	 * it with PROT_EXEC into the process just the way sys_mmap would map it.
835 	 */
836 	if (sigobject == NULL) {
837 		extern int sigfillsiz;
838 		extern u_char sigfill[];
839 		size_t off, left;
840 		vaddr_t va;
841 		int r;
842 
843 		sigobject = uao_create(sz, 0);
844 		uao_reference(sigobject);	/* permanent reference */
845 
846 		if ((r = uvm_map(kernel_map, &va, round_page(sz), sigobject,
847 		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
848 		    MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) {
849 			uao_detach(sigobject);
850 			return (ENOMEM);
851 		}
852 
853 		for (off = 0, left = round_page(sz); left != 0;
854 		    off += sigfillsiz) {
855 			size_t chunk = ulmin(left, sigfillsiz);
856 			memcpy((caddr_t)va + off, sigfill, chunk);
857 			left -= chunk;
858 		}
859 		memcpy((caddr_t)va, sigcode, sz);
860 
861 		(void) uvm_map_protect(kernel_map, va, round_page(sz),
862 		    PROT_READ, 0, FALSE, FALSE);
863 		sigcode_va = va;
864 		sigcode_sz = round_page(sz);
865 	}
866 
867 	pr->ps_sigcode = 0; /* no hint */
868 	uao_reference(sigobject);
869 	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz),
870 	    sigobject, 0, 0, UVM_MAPFLAG(PROT_EXEC,
871 	    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
872 	    MADV_RANDOM, UVM_FLAG_COPYONW | UVM_FLAG_SYSCALL))) {
873 		uao_detach(sigobject);
874 		return (ENOMEM);
875 	}
876 	uvm_map_immutable(&pr->ps_vmspace->vm_map, pr->ps_sigcode,
877 	    pr->ps_sigcode + round_page(sz), 1);
878 
879 	/* Calculate PC at point of sigreturn entry */
880 	pr->ps_sigcoderet = pr->ps_sigcode + (sigcoderet - sigcode);
881 
882 	return (0);
883 }
884 
885 int
886 exec_timekeep_map(struct process *pr)
887 {
888 	size_t timekeep_sz = round_page(sizeof(struct timekeep));
889 
890 	/*
891 	 * Similar to the sigcode object
892 	 */
893 	if (timekeep_object == NULL) {
894 		vaddr_t va = 0;
895 
896 		timekeep_object = uao_create(timekeep_sz, 0);
897 		uao_reference(timekeep_object);
898 
899 		if (uvm_map(kernel_map, &va, timekeep_sz, timekeep_object,
900 		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
901 		    MAP_INHERIT_SHARE, MADV_RANDOM, 0))) {
902 			uao_detach(timekeep_object);
903 			timekeep_object = NULL;
904 			return (ENOMEM);
905 		}
906 		if (uvm_fault_wire(kernel_map, va, va + timekeep_sz,
907 		    PROT_READ | PROT_WRITE)) {
908 			uvm_unmap(kernel_map, va, va + timekeep_sz);
909 			uao_detach(timekeep_object);
910 			timekeep_object = NULL;
911 			return (ENOMEM);
912 		}
913 
914 		timekeep = (struct timekeep *)va;
915 		timekeep->tk_version = TK_VERSION;
916 	}
917 
918 	pr->ps_timekeep = 0; /* no hint */
919 	uao_reference(timekeep_object);
920 	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_timekeep, timekeep_sz,
921 	    timekeep_object, 0, 0, UVM_MAPFLAG(PROT_READ, PROT_READ,
922 	    MAP_INHERIT_COPY, MADV_RANDOM, 0))) {
923 		uao_detach(timekeep_object);
924 		return (ENOMEM);
925 	}
926 	uvm_map_immutable(&pr->ps_vmspace->vm_map, pr->ps_timekeep,
927 	    pr->ps_timekeep + timekeep_sz, 1);
928 
929 	return (0);
930 }
931