xref: /openbsd-src/sys/kern/kern_exec.c (revision fb8aa7497fded39583f40e800732f9c046411717)
1 /*	$OpenBSD: kern_exec.c,v 1.182 2016/06/11 21:00:11 kettenis Exp $	*/
2 /*	$NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $	*/
3 
4 /*-
5  * Copyright (C) 1993, 1994 Christopher G. Demetriou
6  * Copyright (C) 1992 Wolfgang Solfrank.
7  * Copyright (C) 1992 TooLs GmbH.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by TooLs GmbH.
21  * 4. The name of TooLs GmbH may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/filedesc.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/pool.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/acct.h>
48 #include <sys/exec.h>
49 #include <sys/ktrace.h>
50 #include <sys/resourcevar.h>
51 #include <sys/wait.h>
52 #include <sys/mman.h>
53 #include <sys/signalvar.h>
54 #include <sys/stat.h>
55 #include <sys/conf.h>
56 #include <sys/pledge.h>
57 #ifdef SYSVSHM
58 #include <sys/shm.h>
59 #endif
60 
61 #include <sys/syscallargs.h>
62 
63 #include <uvm/uvm_extern.h>
64 
65 #ifdef __HAVE_MD_TCB
66 # include <machine/tcb.h>
67 #endif
68 
69 const struct kmem_va_mode kv_exec = {
70 	.kv_wait = 1,
71 	.kv_map = &exec_map
72 };
73 
74 /*
75  * Map the shared signal code.
76  */
77 int exec_sigcode_map(struct process *, struct emul *);
78 
79 /*
80  * If non-zero, stackgap_random specifies the upper limit of the random gap size
81  * added to the fixed stack position. Must be n^2.
82  */
83 int stackgap_random = STACKGAP_RANDOM;
84 
85 /*
86  * check exec:
87  * given an "executable" described in the exec package's namei info,
88  * see what we can do with it.
89  *
90  * ON ENTRY:
91  *	exec package with appropriate namei info
92  *	proc pointer of exec'ing proc
93  *	NO SELF-LOCKED VNODES
94  *
95  * ON EXIT:
96  *	error:	nothing held, etc.  exec header still allocated.
97  *	ok:	filled exec package, one locked vnode.
98  *
99  * EXEC SWITCH ENTRY:
100  * 	Locked vnode to check, exec package, proc.
101  *
102  * EXEC SWITCH EXIT:
103  *	ok:	return 0, filled exec package, one locked vnode.
104  *	error:	destructive:
105  *			everything deallocated except exec header.
106  *		non-destructive:
107  *			error code, locked vnode, exec header unmodified
108  */
109 int
110 check_exec(struct proc *p, struct exec_package *epp)
111 {
112 	int error, i;
113 	struct vnode *vp;
114 	struct nameidata *ndp;
115 	size_t resid;
116 
117 	ndp = epp->ep_ndp;
118 	ndp->ni_cnd.cn_nameiop = LOOKUP;
119 	ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
120 	/* first get the vnode */
121 	if ((error = namei(ndp)) != 0)
122 		return (error);
123 	epp->ep_vp = vp = ndp->ni_vp;
124 
125 	/* check for regular file */
126 	if (vp->v_type == VDIR) {
127 		error = EISDIR;
128 		goto bad1;
129 	}
130 	if (vp->v_type != VREG) {
131 		error = EACCES;
132 		goto bad1;
133 	}
134 
135 	/* get attributes */
136 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
137 		goto bad1;
138 
139 	/* Check mount point */
140 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
141 		error = EACCES;
142 		goto bad1;
143 	}
144 
145 	if ((vp->v_mount->mnt_flag & MNT_NOSUID))
146 		epp->ep_vap->va_mode &= ~(VSUID | VSGID);
147 
148 	/* check access.  for root we have to see if any exec bit on */
149 	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
150 		goto bad1;
151 	if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
152 		error = EACCES;
153 		goto bad1;
154 	}
155 
156 	/* try to open it */
157 	if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
158 		goto bad1;
159 
160 	/* unlock vp, we need it unlocked from here */
161 	VOP_UNLOCK(vp, p);
162 
163 	/* now we have the file, get the exec header */
164 	error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
165 	    UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
166 	if (error)
167 		goto bad2;
168 	epp->ep_hdrvalid = epp->ep_hdrlen - resid;
169 
170 	/*
171 	 * set up the vmcmds for creation of the process
172 	 * address space
173 	 */
174 	error = ENOEXEC;
175 	for (i = 0; i < nexecs && error != 0; i++) {
176 		int newerror;
177 
178 		if (execsw[i].es_check == NULL)
179 			continue;
180 		newerror = (*execsw[i].es_check)(p, epp);
181 		if (!newerror && !(epp->ep_emul->e_flags & EMUL_ENABLED))
182 			newerror = EPERM;
183 		/* make sure the first "interesting" error code is saved. */
184 		if (!newerror || error == ENOEXEC)
185 			error = newerror;
186 		if (epp->ep_flags & EXEC_DESTR && error != 0)
187 			return (error);
188 	}
189 	if (!error) {
190 		/* check that entry point is sane */
191 		if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
192 			error = ENOEXEC;
193 		}
194 
195 		/* check limits */
196 		if ((epp->ep_tsize > MAXTSIZ) ||
197 		    (epp->ep_dsize > p->p_rlimit[RLIMIT_DATA].rlim_cur))
198 			error = ENOMEM;
199 
200 		if (!error)
201 			return (0);
202 	}
203 
204 	/*
205 	 * free any vmspace-creation commands,
206 	 * and release their references
207 	 */
208 	kill_vmcmds(&epp->ep_vmcmds);
209 
210 bad2:
211 	/*
212 	 * close the vnode, free the pathname buf, and punt.
213 	 */
214 	vn_close(vp, FREAD, p->p_ucred, p);
215 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
216 	return (error);
217 
218 bad1:
219 	/*
220 	 * free the namei pathname buffer, and put the vnode
221 	 * (which we don't yet have open).
222 	 */
223 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
224 	vput(vp);
225 	return (error);
226 }
227 
228 /*
229  * exec system call
230  */
231 int
232 sys_execve(struct proc *p, void *v, register_t *retval)
233 {
234 	struct sys_execve_args /* {
235 		syscallarg(const char *) path;
236 		syscallarg(char *const *) argp;
237 		syscallarg(char *const *) envp;
238 	} */ *uap = v;
239 	int error;
240 	struct exec_package pack;
241 	struct nameidata nid;
242 	struct vattr attr;
243 	struct ucred *cred = p->p_ucred;
244 	char *argp;
245 	char * const *cpp, *dp, *sp;
246 #ifdef KTRACE
247 	char *env_start;
248 #endif
249 	struct process *pr = p->p_p;
250 	long argc, envc;
251 	size_t len, sgap;
252 #ifdef MACHINE_STACK_GROWS_UP
253 	size_t slen;
254 #endif
255 	char *stack;
256 	struct ps_strings arginfo;
257 	struct vmspace *vm = pr->ps_vmspace;
258 	char **tmpfap;
259 	extern struct emul emul_native;
260 	struct vnode *otvp;
261 
262 	/* get other threads to stop */
263 	if ((error = single_thread_set(p, SINGLE_UNWIND, 1)))
264 		return (error);
265 
266 	/*
267 	 * Cheap solution to complicated problems.
268 	 * Mark this process as "leave me alone, I'm execing".
269 	 */
270 	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);
271 
272 	NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
273 	nid.ni_pledge = PLEDGE_EXEC;
274 
275 	/*
276 	 * initialize the fields of the exec package.
277 	 */
278 	pack.ep_name = (char *)SCARG(uap, path);
279 	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
280 	pack.ep_hdrlen = exec_maxhdrsz;
281 	pack.ep_hdrvalid = 0;
282 	pack.ep_ndp = &nid;
283 	pack.ep_interp = NULL;
284 	pack.ep_emul_arg = NULL;
285 	VMCMDSET_INIT(&pack.ep_vmcmds);
286 	pack.ep_vap = &attr;
287 	pack.ep_emul = &emul_native;
288 	pack.ep_flags = 0;
289 
290 	/* see if we can run it. */
291 	if ((error = check_exec(p, &pack)) != 0) {
292 		goto freehdr;
293 	}
294 
295 	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
296 
297 	/* allocate an argument buffer */
298 	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
299 #ifdef DIAGNOSTIC
300 	if (argp == NULL)
301 		panic("execve: argp == NULL");
302 #endif
303 	dp = argp;
304 	argc = 0;
305 
306 	/* copy the fake args list, if there's one, freeing it as we go */
307 	if (pack.ep_flags & EXEC_HASARGL) {
308 		tmpfap = pack.ep_fa;
309 		while (*tmpfap != NULL) {
310 			char *cp;
311 
312 			cp = *tmpfap;
313 			while (*cp)
314 				*dp++ = *cp++;
315 			*dp++ = '\0';
316 
317 			free(*tmpfap, M_EXEC, 0);
318 			tmpfap++; argc++;
319 		}
320 		free(pack.ep_fa, M_EXEC, 0);
321 		pack.ep_flags &= ~EXEC_HASARGL;
322 	}
323 
324 	/* Now get argv & environment */
325 	if (!(cpp = SCARG(uap, argp))) {
326 		error = EFAULT;
327 		goto bad;
328 	}
329 
330 	if (pack.ep_flags & EXEC_SKIPARG)
331 		cpp++;
332 
333 	while (1) {
334 		len = argp + ARG_MAX - dp;
335 		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
336 			goto bad;
337 		if (!sp)
338 			break;
339 		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
340 			if (error == ENAMETOOLONG)
341 				error = E2BIG;
342 			goto bad;
343 		}
344 		dp += len;
345 		cpp++;
346 		argc++;
347 	}
348 
349 	/* must have at least one argument */
350 	if (argc == 0) {
351 		error = EINVAL;
352 		goto bad;
353 	}
354 
355 #ifdef KTRACE
356 	if (KTRPOINT(p, KTR_EXECARGS))
357 		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
358 #endif
359 
360 	envc = 0;
361 	/* environment does not need to be there */
362 	if ((cpp = SCARG(uap, envp)) != NULL ) {
363 #ifdef KTRACE
364 		env_start = dp;
365 #endif
366 		while (1) {
367 			len = argp + ARG_MAX - dp;
368 			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
369 				goto bad;
370 			if (!sp)
371 				break;
372 			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
373 				if (error == ENAMETOOLONG)
374 					error = E2BIG;
375 				goto bad;
376 			}
377 			dp += len;
378 			cpp++;
379 			envc++;
380 		}
381 
382 #ifdef KTRACE
383 		if (KTRPOINT(p, KTR_EXECENV))
384 			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
385 #endif
386 	}
387 
388 	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);
389 
390 	sgap = STACKGAPLEN;
391 
392 	/*
393 	 * If we have enabled random stackgap, the stack itself has already
394 	 * been moved from a random location, but is still aligned to a page
395 	 * boundary.  Provide the lower bits of random placement now.
396 	 */
397 	if (stackgap_random != 0) {
398 		sgap += arc4random() & PAGE_MASK;
399 		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
400 	}
401 
402 	/* Now check if args & environ fit into new stack */
403 	len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) +
404 	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;
405 
406 	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;
407 
408 	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
409 		error = ENOMEM;
410 		goto bad;
411 	}
412 
413 	/* adjust "active stack depth" for process VSZ */
414 	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */
415 
416 	/*
417 	 * we're committed: any further errors will kill the process, so
418 	 * kill the other threads now.
419 	 */
420 	single_thread_set(p, SINGLE_EXIT, 0);
421 
422 	/*
423 	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
424 	 * pr_vmspace!
425 	 */
426 	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
427 
428 	vm = pr->ps_vmspace;
429 	/* Now map address space */
430 	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
431 	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
432 	    trunc_page(pack.ep_taddr));
433 	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
434 	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
435 	    trunc_page(pack.ep_daddr));
436 	vm->vm_dused = 0;
437 	vm->vm_ssize = atop(round_page(pack.ep_ssize));
438 	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
439 	vm->vm_minsaddr = (char *)pack.ep_minsaddr;
440 
441 	/* create the new process's VM space by running the vmcmds */
442 #ifdef DIAGNOSTIC
443 	if (pack.ep_vmcmds.evs_used == 0)
444 		panic("execve: no vmcmds");
445 #endif
446 	error = exec_process_vmcmds(p, &pack);
447 
448 	/* if an error happened, deallocate and punt */
449 	if (error)
450 		goto exec_abort;
451 
452 	/* old "stackgap" is gone now */
453 	pr->ps_stackgap = 0;
454 
455 #ifdef MACHINE_STACK_GROWS_UP
456 	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
457         if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
458             trunc_page(pr->ps_strings), PROT_NONE, TRUE))
459                 goto exec_abort;
460 #else
461 	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
462         if (uvm_map_protect(&vm->vm_map,
463             round_page(pr->ps_strings + sizeof(arginfo)),
464             (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
465                 goto exec_abort;
466 #endif
467 
468 	/* remember information about the process */
469 	arginfo.ps_nargvstr = argc;
470 	arginfo.ps_nenvstr = envc;
471 
472 #ifdef MACHINE_STACK_GROWS_UP
473 	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
474 	slen = len - sizeof(arginfo) - sgap;
475 #else
476 	stack = (char *)(vm->vm_minsaddr - len);
477 #endif
478 	/* Now copy argc, args & environ to new stack */
479 	if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp))
480 		goto exec_abort;
481 
482 	/* copy out the process's ps_strings structure */
483 	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
484 		goto exec_abort;
485 
486 	stopprofclock(pr);	/* stop profiling */
487 	fdcloseexec(p);		/* handle close on exec */
488 	execsigs(p);		/* reset caught signals */
489 	TCB_SET(p, NULL);	/* reset the TCB address */
490 	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
491 	pr->ps_kbind_cookie = 0;
492 	arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie);
493 
494 	/* set command name & other accounting info */
495 	memset(p->p_comm, 0, sizeof(p->p_comm));
496 	len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
497 	memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len);
498 	pr->ps_acflag &= ~AFORK;
499 
500 	/* record proc's vnode, for use by sysctl */
501 	otvp = pr->ps_textvp;
502 	vref(pack.ep_vp);
503 	pr->ps_textvp = pack.ep_vp;
504 	if (otvp)
505 		vrele(otvp);
506 
507 	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
508 	if (pr->ps_flags & PS_PPWAIT) {
509 		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
510 		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
511 		wakeup(pr->ps_pptr);
512 	}
513 
514 	/*
515 	 * If process does execve() while it has a mismatched real,
516 	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
517 	 */
518 	if (cred->cr_uid != cred->cr_ruid ||
519 	    cred->cr_uid != cred->cr_svuid ||
520 	    cred->cr_gid != cred->cr_rgid ||
521 	    cred->cr_gid != cred->cr_svgid)
522 		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
523 	else
524 		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);
525 
526 	atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE);
527 	pledge_dropwpaths(pr);
528 
529 	/*
530 	 * deal with set[ug]id.
531 	 * MNT_NOEXEC has already been used to disable s[ug]id.
532 	 */
533 	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
534 		int i;
535 
536 		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);
537 
538 #ifdef KTRACE
539 		/*
540 		 * If process is being ktraced, turn off - unless
541 		 * root set it.
542 		 */
543 		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
544 			ktrcleartrace(pr);
545 #endif
546 		p->p_ucred = cred = crcopy(cred);
547 		if (attr.va_mode & VSUID)
548 			cred->cr_uid = attr.va_uid;
549 		if (attr.va_mode & VSGID)
550 			cred->cr_gid = attr.va_gid;
551 
552 		/*
553 		 * For set[ug]id processes, a few caveats apply to
554 		 * stdin, stdout, and stderr.
555 		 */
556 		error = 0;
557 		fdplock(p->p_fd);
558 		for (i = 0; i < 3; i++) {
559 			struct file *fp = NULL;
560 
561 			/*
562 			 * NOTE - This will never return NULL because of
563 			 * immature fds. The file descriptor table is not
564 			 * shared because we're suid.
565 			 */
566 			fp = fd_getfile(p->p_fd, i);
567 
568 			/*
569 			 * Ensure that stdin, stdout, and stderr are already
570 			 * allocated.  We do not want userland to accidentally
571 			 * allocate descriptors in this range which has implied
572 			 * meaning to libc.
573 			 */
574 			if (fp == NULL) {
575 				short flags = FREAD | (i == 0 ? 0 : FWRITE);
576 				struct vnode *vp;
577 				int indx;
578 
579 				if ((error = falloc(p, &fp, &indx)) != 0)
580 					break;
581 #ifdef DIAGNOSTIC
582 				if (indx != i)
583 					panic("sys_execve: falloc indx != i");
584 #endif
585 				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
586 					fdremove(p->p_fd, indx);
587 					closef(fp, p);
588 					break;
589 				}
590 				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
591 					fdremove(p->p_fd, indx);
592 					closef(fp, p);
593 					vrele(vp);
594 					break;
595 				}
596 				if (flags & FWRITE)
597 					vp->v_writecount++;
598 				fp->f_flag = flags;
599 				fp->f_type = DTYPE_VNODE;
600 				fp->f_ops = &vnops;
601 				fp->f_data = (caddr_t)vp;
602 				FILE_SET_MATURE(fp, p);
603 			}
604 		}
605 		fdpunlock(p->p_fd);
606 		if (error)
607 			goto exec_abort;
608 	} else
609 		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);
610 
611 	/*
612 	 * Reset the saved ugids and update the process's copy of the
613 	 * creds if the creds have been changed
614 	 */
615 	if (cred->cr_uid != cred->cr_svuid ||
616 	    cred->cr_gid != cred->cr_svgid) {
617 		/* make sure we have unshared ucreds */
618 		p->p_ucred = cred = crcopy(cred);
619 		cred->cr_svuid = cred->cr_uid;
620 		cred->cr_svgid = cred->cr_gid;
621 	}
622 
623 	if (pr->ps_ucred != cred) {
624 		struct ucred *ocred;
625 
626 		ocred = pr->ps_ucred;
627 		crhold(cred);
628 		pr->ps_ucred = cred;
629 		crfree(ocred);
630 	}
631 
632 	if (pr->ps_flags & PS_SUGIDEXEC) {
633 		int i, s = splclock();
634 
635 		timeout_del(&pr->ps_realit_to);
636 		for (i = 0; i < nitems(pr->ps_timer); i++) {
637 			timerclear(&pr->ps_timer[i].it_interval);
638 			timerclear(&pr->ps_timer[i].it_value);
639 		}
640 		splx(s);
641 	}
642 
643 	/* reset CPU time usage for the thread, but not the process */
644 	timespecclear(&p->p_tu.tu_runtime);
645 	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
646 
647 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
648 
649 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
650 	vn_close(pack.ep_vp, FREAD, cred, p);
651 
652 	/*
653 	 * notify others that we exec'd
654 	 */
655 	KNOTE(&pr->ps_klist, NOTE_EXEC);
656 
657 	/* setup new registers and do misc. setup. */
658 	if (pack.ep_emul->e_fixup != NULL) {
659 		if ((*pack.ep_emul->e_fixup)(p, &pack) != 0)
660 			goto free_pack_abort;
661 	}
662 #ifdef MACHINE_STACK_GROWS_UP
663 	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval);
664 #else
665 	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval);
666 #endif
667 
668 	/* map the process's signal trampoline code */
669 	if (exec_sigcode_map(pr, pack.ep_emul))
670 		goto free_pack_abort;
671 
672 #ifdef __HAVE_EXEC_MD_MAP
673 	/* perform md specific mappings that process might need */
674 	if (exec_md_map(p, &pack))
675 		goto free_pack_abort;
676 #endif
677 
678 	if (pr->ps_flags & PS_TRACED)
679 		psignal(p, SIGTRAP);
680 
681 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
682 
683 	/*
684 	 * Call emulation specific exec hook. This can setup per-process
685 	 * p->p_emuldata or do any other per-process stuff an emulation needs.
686 	 *
687 	 * If we are executing process of different emulation than the
688 	 * original forked process, call e_proc_exit() of the old emulation
689 	 * first, then e_proc_exec() of new emulation. If the emulation is
690 	 * same, the exec hook code should deallocate any old emulation
691 	 * resources held previously by this process.
692 	 */
693 	if (pr->ps_emul && pr->ps_emul->e_proc_exit &&
694 	    pr->ps_emul != pack.ep_emul)
695 		(*pr->ps_emul->e_proc_exit)(p);
696 
697 	p->p_descfd = 255;
698 	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
699 		p->p_descfd = pack.ep_fd;
700 
701 	if (pack.ep_flags & EXEC_WXNEEDED)
702 		p->p_p->ps_flags |= PS_WXNEEDED;
703 
704 	/*
705 	 * Call exec hook. Emulation code may NOT store reference to anything
706 	 * from &pack.
707 	 */
708 	if (pack.ep_emul->e_proc_exec)
709 		(*pack.ep_emul->e_proc_exec)(p, &pack);
710 
711 	/* update ps_emul, the old value is no longer needed */
712 	pr->ps_emul = pack.ep_emul;
713 
714 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
715 	single_thread_clear(p, P_SUSPSIG);
716 
717 	return (0);
718 
719 bad:
720 	/* free the vmspace-creation commands, and release their references */
721 	kill_vmcmds(&pack.ep_vmcmds);
722 	/* kill any opened file descriptor, if necessary */
723 	if (pack.ep_flags & EXEC_HASFD) {
724 		pack.ep_flags &= ~EXEC_HASFD;
725 		fdplock(p->p_fd);
726 		(void) fdrelease(p, pack.ep_fd);
727 		fdpunlock(p->p_fd);
728 	}
729 	if (pack.ep_interp != NULL)
730 		pool_put(&namei_pool, pack.ep_interp);
731 	if (pack.ep_emul_arg != NULL)
732 		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
733 	/* close and put the exec'd file */
734 	vn_close(pack.ep_vp, FREAD, cred, p);
735 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
736 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
737 
738 freehdr:
739 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
740 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
741 	single_thread_clear(p, P_SUSPSIG);
742 
743 	return (error);
744 
745 exec_abort:
746 	/*
747 	 * the old process doesn't exist anymore.  exit gracefully.
748 	 * get rid of the (new) address space we have created, if any, get rid
749 	 * of our namei data and vnode, and exit noting failure
750 	 */
751 	uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
752 		VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
753 	if (pack.ep_interp != NULL)
754 		pool_put(&namei_pool, pack.ep_interp);
755 	if (pack.ep_emul_arg != NULL)
756 		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
757 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
758 	vn_close(pack.ep_vp, FREAD, cred, p);
759 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
760 
761 free_pack_abort:
762 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
763 	exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL);
764 
765 	/* NOTREACHED */
766 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
767 
768 	return (0);
769 }
770 
771 
772 void *
773 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack,
774     void *argp)
775 {
776 	char **cpp = stack;
777 	char *dp, *sp;
778 	size_t len;
779 	void *nullp = NULL;
780 	long argc = arginfo->ps_nargvstr;
781 	int envc = arginfo->ps_nenvstr;
782 
783 	if (copyout(&argc, cpp++, sizeof(argc)))
784 		return (NULL);
785 
786 	dp = (char *) (cpp + argc + envc + 2 + pack->ep_emul->e_arglen);
787 	sp = argp;
788 
789 	/* XXX don't copy them out, remap them! */
790 	arginfo->ps_argvstr = cpp; /* remember location of argv for later */
791 
792 	for (; --argc >= 0; sp += len, dp += len)
793 		if (copyout(&dp, cpp++, sizeof(dp)) ||
794 		    copyoutstr(sp, dp, ARG_MAX, &len))
795 			return (NULL);
796 
797 	if (copyout(&nullp, cpp++, sizeof(nullp)))
798 		return (NULL);
799 
800 	arginfo->ps_envstr = cpp; /* remember location of envp for later */
801 
802 	for (; --envc >= 0; sp += len, dp += len)
803 		if (copyout(&dp, cpp++, sizeof(dp)) ||
804 		    copyoutstr(sp, dp, ARG_MAX, &len))
805 			return (NULL);
806 
807 	if (copyout(&nullp, cpp++, sizeof(nullp)))
808 		return (NULL);
809 
810 	return (cpp);
811 }
812 
813 int
814 exec_sigcode_map(struct process *pr, struct emul *e)
815 {
816 	vsize_t sz;
817 
818 	sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
819 
820 	/*
821 	 * If we don't have a sigobject for this emulation, create one.
822 	 *
823 	 * sigobject is an anonymous memory object (just like SYSV shared
824 	 * memory) that we keep a permanent reference to and that we map
825 	 * in all processes that need this sigcode. The creation is simple,
826 	 * we create an object, add a permanent reference to it, map it in
827 	 * kernel space, copy out the sigcode to it and unmap it.
828 	 * Then we map it with PROT_READ|PROT_EXEC into the process just
829 	 * the way sys_mmap would map it.
830 	 */
831 	if (e->e_sigobject == NULL) {
832 		extern int sigfillsiz;
833 		extern u_char sigfill[];
834 		size_t off;
835 		vaddr_t va;
836 		int r;
837 
838 		e->e_sigobject = uao_create(sz, 0);
839 		uao_reference(e->e_sigobject);	/* permanent reference */
840 
841 		if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject,
842 		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
843 		    MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) {
844 			uao_detach(e->e_sigobject);
845 			return (ENOMEM);
846 		}
847 
848 		for (off = 0; off < round_page(sz); off += sigfillsiz)
849 			memcpy((caddr_t)va + off, sigfill, sigfillsiz);
850 		memcpy((caddr_t)va, e->e_sigcode, sz);
851 		uvm_unmap(kernel_map, va, va + round_page(sz));
852 	}
853 
854 	pr->ps_sigcode = 0; /* no hint */
855 	uao_reference(e->e_sigobject);
856 	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz),
857 	    e->e_sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC,
858 	    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
859 	    MADV_RANDOM, UVM_FLAG_COPYONW))) {
860 		uao_detach(e->e_sigobject);
861 		return (ENOMEM);
862 	}
863 
864 	/* Calculate PC at point of sigreturn entry */
865 	pr->ps_sigcoderet = pr->ps_sigcode +
866 	    (pr->ps_emul->e_esigret - pr->ps_emul->e_sigcode);
867 
868 	return (0);
869 }
870