xref: /openbsd-src/sys/kern/kern_exec.c (revision a5429850edcc9dd5646cc8ddb251ed22eba08b09)
1 /*	$OpenBSD: kern_exec.c,v 1.231 2022/08/14 01:58:27 jsg Exp $	*/
2 /*	$NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $	*/
3 
4 /*-
5  * Copyright (C) 1993, 1994 Christopher G. Demetriou
6  * Copyright (C) 1992 Wolfgang Solfrank.
7  * Copyright (C) 1992 TooLs GmbH.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by TooLs GmbH.
21  * 4. The name of TooLs GmbH may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/filedesc.h>
39 #include <sys/proc.h>
40 #include <sys/mount.h>
41 #include <sys/malloc.h>
42 #include <sys/pool.h>
43 #include <sys/namei.h>
44 #include <sys/vnode.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/acct.h>
48 #include <sys/exec.h>
49 #include <sys/exec_elf.h>
50 #include <sys/ktrace.h>
51 #include <sys/resourcevar.h>
52 #include <sys/mman.h>
53 #include <sys/signalvar.h>
54 #include <sys/stat.h>
55 #include <sys/conf.h>
56 #include <sys/pledge.h>
57 #ifdef SYSVSHM
58 #include <sys/shm.h>
59 #endif
60 
61 #include <sys/syscallargs.h>
62 
63 #include <uvm/uvm_extern.h>
64 #include <machine/tcb.h>
65 
66 #include <sys/timetc.h>
67 
68 struct uvm_object *sigobject;		/* shared sigcode object */
69 struct uvm_object *timekeep_object;
70 struct timekeep *timekeep;
71 
72 void	unveil_destroy(struct process *ps);
73 
74 const struct kmem_va_mode kv_exec = {
75 	.kv_wait = 1,
76 	.kv_map = &exec_map
77 };
78 
79 /*
80  * Map the shared signal code.
81  */
82 int exec_sigcode_map(struct process *);
83 
84 /*
85  * Map the shared timekeep page.
86  */
87 int exec_timekeep_map(struct process *);
88 
89 /*
90  * If non-zero, stackgap_random specifies the upper limit of the random gap size
91  * added to the fixed stack position. Must be n^2.
92  */
93 int stackgap_random = STACKGAP_RANDOM;
94 
95 /*
96  * check exec:
97  * given an "executable" described in the exec package's namei info,
98  * see what we can do with it.
99  *
100  * ON ENTRY:
101  *	exec package with appropriate namei info
102  *	proc pointer of exec'ing proc
103  *	NO SELF-LOCKED VNODES
104  *
105  * ON EXIT:
106  *	error:	nothing held, etc.  exec header still allocated.
107  *	ok:	filled exec package, one locked vnode.
108  *
109  * EXEC SWITCH ENTRY:
110  * 	Locked vnode to check, exec package, proc.
111  *
112  * EXEC SWITCH EXIT:
113  *	ok:	return 0, filled exec package, one locked vnode.
114  *	error:	destructive:
115  *			everything deallocated except exec header.
116  *		non-destructive:
117  *			error code, locked vnode, exec header unmodified
118  */
119 int
120 check_exec(struct proc *p, struct exec_package *epp)
121 {
122 	int error, i;
123 	struct vnode *vp;
124 	struct nameidata *ndp;
125 	size_t resid;
126 
127 	ndp = epp->ep_ndp;
128 	ndp->ni_cnd.cn_nameiop = LOOKUP;
129 	ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
130 	if (epp->ep_flags & EXEC_INDIR)
131 		ndp->ni_cnd.cn_flags |= BYPASSUNVEIL;
132 	/* first get the vnode */
133 	if ((error = namei(ndp)) != 0)
134 		return (error);
135 	epp->ep_vp = vp = ndp->ni_vp;
136 
137 	/* check for regular file */
138 	if (vp->v_type != VREG) {
139 		error = EACCES;
140 		goto bad1;
141 	}
142 
143 	/* get attributes */
144 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
145 		goto bad1;
146 
147 	/* Check mount point */
148 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
149 		error = EACCES;
150 		goto bad1;
151 	}
152 
153 	/* SUID programs may not be started with execpromises */
154 	if ((epp->ep_vap->va_mode & (VSUID | VSGID)) &&
155 	    (p->p_p->ps_flags & PS_EXECPLEDGE)) {
156 		error = EACCES;
157 		goto bad1;
158 	}
159 
160 	if ((vp->v_mount->mnt_flag & MNT_NOSUID))
161 		epp->ep_vap->va_mode &= ~(VSUID | VSGID);
162 
163 	/* check access.  for root we have to see if any exec bit on */
164 	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
165 		goto bad1;
166 	if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
167 		error = EACCES;
168 		goto bad1;
169 	}
170 
171 	/* try to open it */
172 	if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
173 		goto bad1;
174 
175 	/* unlock vp, we need it unlocked from here */
176 	VOP_UNLOCK(vp);
177 
178 	/* now we have the file, get the exec header */
179 	error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
180 	    UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
181 	if (error)
182 		goto bad2;
183 	epp->ep_hdrvalid = epp->ep_hdrlen - resid;
184 
185 	/*
186 	 * set up the vmcmds for creation of the process
187 	 * address space
188 	 */
189 	error = ENOEXEC;
190 	for (i = 0; i < nexecs && error != 0; i++) {
191 		int newerror;
192 
193 		if (execsw[i].es_check == NULL)
194 			continue;
195 		newerror = (*execsw[i].es_check)(p, epp);
196 		/* make sure the first "interesting" error code is saved. */
197 		if (!newerror || error == ENOEXEC)
198 			error = newerror;
199 		if (epp->ep_flags & EXEC_DESTR && error != 0)
200 			return (error);
201 	}
202 	if (!error) {
203 		/* check that entry point is sane */
204 		if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
205 			error = ENOEXEC;
206 		}
207 
208 		/* check limits */
209 		if ((epp->ep_tsize > MAXTSIZ) ||
210 		    (epp->ep_dsize > lim_cur(RLIMIT_DATA)))
211 			error = ENOMEM;
212 
213 		if (!error)
214 			return (0);
215 	}
216 
217 	/*
218 	 * free any vmspace-creation commands,
219 	 * and release their references
220 	 */
221 	kill_vmcmds(&epp->ep_vmcmds);
222 
223 bad2:
224 	/*
225 	 * close the vnode, free the pathname buf, and punt.
226 	 */
227 	vn_close(vp, FREAD, p->p_ucred, p);
228 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
229 	return (error);
230 
231 bad1:
232 	/*
233 	 * free the namei pathname buffer, and put the vnode
234 	 * (which we don't yet have open).
235 	 */
236 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
237 	vput(vp);
238 	return (error);
239 }
240 
241 /*
242  * exec system call
243  */
244 int
245 sys_execve(struct proc *p, void *v, register_t *retval)
246 {
247 	struct sys_execve_args /* {
248 		syscallarg(const char *) path;
249 		syscallarg(char *const *) argp;
250 		syscallarg(char *const *) envp;
251 	} */ *uap = v;
252 	int error;
253 	struct exec_package pack;
254 	struct nameidata nid;
255 	struct vattr attr;
256 	struct ucred *cred = p->p_ucred;
257 	char *argp;
258 	char * const *cpp, *dp, *sp;
259 #ifdef KTRACE
260 	char *env_start;
261 #endif
262 	struct process *pr = p->p_p;
263 	long argc, envc;
264 	size_t len, sgap, dstsize;
265 #ifdef MACHINE_STACK_GROWS_UP
266 	size_t slen;
267 #endif
268 	char *stack;
269 	struct ps_strings arginfo;
270 	struct vmspace *vm;
271 	struct vnode *otvp;
272 
273 	/* get other threads to stop */
274 	if ((error = single_thread_set(p, SINGLE_UNWIND, 1)))
275 		return (error);
276 
277 	/*
278 	 * Cheap solution to complicated problems.
279 	 * Mark this process as "leave me alone, I'm execing".
280 	 */
281 	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);
282 
283 	NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
284 	nid.ni_pledge = PLEDGE_EXEC;
285 	nid.ni_unveil = UNVEIL_EXEC;
286 
287 	/*
288 	 * initialize the fields of the exec package.
289 	 */
290 	pack.ep_name = (char *)SCARG(uap, path);
291 	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
292 	pack.ep_hdrlen = exec_maxhdrsz;
293 	pack.ep_hdrvalid = 0;
294 	pack.ep_ndp = &nid;
295 	pack.ep_interp = NULL;
296 	pack.ep_args = NULL;
297 	pack.ep_auxinfo = NULL;
298 	VMCMDSET_INIT(&pack.ep_vmcmds);
299 	pack.ep_vap = &attr;
300 	pack.ep_flags = 0;
301 
302 	/* see if we can run it. */
303 	if ((error = check_exec(p, &pack)) != 0) {
304 		goto freehdr;
305 	}
306 
307 	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
308 
309 	/* allocate an argument buffer */
310 	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
311 #ifdef DIAGNOSTIC
312 	if (argp == NULL)
313 		panic("execve: argp == NULL");
314 #endif
315 	dp = argp;
316 	argc = 0;
317 
318 	/*
319 	 * Copy the fake args list, if there's one, freeing it as we go.
320 	 * exec_script_makecmds() allocates either 2 or 3 fake args bounded
321 	 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen.
322 	 */
323 	if (pack.ep_flags & EXEC_HASARGL) {
324 		dstsize = NCARGS;
325 		for(; pack.ep_fa[argc] != NULL; argc++) {
326 			len = strlcpy(dp, pack.ep_fa[argc], dstsize);
327 			len++;
328 			dp += len; dstsize -= len;
329 			if (pack.ep_fa[argc+1] != NULL)
330 				free(pack.ep_fa[argc], M_EXEC, len);
331 			else
332 				free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN);
333 		}
334 		free(pack.ep_fa, M_EXEC, 4 * sizeof(char *));
335 		pack.ep_flags &= ~EXEC_HASARGL;
336 	}
337 
338 	/* Now get argv & environment */
339 	if (!(cpp = SCARG(uap, argp))) {
340 		error = EFAULT;
341 		goto bad;
342 	}
343 
344 	if (pack.ep_flags & EXEC_SKIPARG)
345 		cpp++;
346 
347 	while (1) {
348 		len = argp + ARG_MAX - dp;
349 		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
350 			goto bad;
351 		if (!sp)
352 			break;
353 		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
354 			if (error == ENAMETOOLONG)
355 				error = E2BIG;
356 			goto bad;
357 		}
358 		dp += len;
359 		cpp++;
360 		argc++;
361 	}
362 
363 	/* must have at least one argument */
364 	if (argc == 0) {
365 		error = EINVAL;
366 		goto bad;
367 	}
368 
369 #ifdef KTRACE
370 	if (KTRPOINT(p, KTR_EXECARGS))
371 		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
372 #endif
373 
374 	envc = 0;
375 	/* environment does not need to be there */
376 	if ((cpp = SCARG(uap, envp)) != NULL ) {
377 #ifdef KTRACE
378 		env_start = dp;
379 #endif
380 		while (1) {
381 			len = argp + ARG_MAX - dp;
382 			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
383 				goto bad;
384 			if (!sp)
385 				break;
386 			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
387 				if (error == ENAMETOOLONG)
388 					error = E2BIG;
389 				goto bad;
390 			}
391 			dp += len;
392 			cpp++;
393 			envc++;
394 		}
395 
396 #ifdef KTRACE
397 		if (KTRPOINT(p, KTR_EXECENV))
398 			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
399 #endif
400 	}
401 
402 	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);
403 
404 	/*
405 	 * If we have enabled random stackgap, the stack itself has already
406 	 * been moved from a random location, but is still aligned to a page
407 	 * boundary.  Provide the lower bits of random placement now.
408 	 */
409 	if (stackgap_random == 0) {
410 		sgap = 0;
411 	} else {
412 		sgap = arc4random() & PAGE_MASK;
413 		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
414 	}
415 
416 	/* Now check if args & environ fit into new stack */
417 	len = ((argc + envc + 2 + ELF_AUX_WORDS) * sizeof(char *) +
418 	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;
419 
420 	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;
421 
422 	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
423 		error = ENOMEM;
424 		goto bad;
425 	}
426 
427 	/* adjust "active stack depth" for process VSZ */
428 	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */
429 
430 	/*
431 	 * we're committed: any further errors will kill the process, so
432 	 * kill the other threads now.
433 	 */
434 	single_thread_set(p, SINGLE_EXIT, 1);
435 
436 	/*
437 	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
438 	 * ps_vmspace!
439 	 */
440 	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
441 
442 	vm = pr->ps_vmspace;
443 	/* Now map address space */
444 	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
445 	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
446 	    trunc_page(pack.ep_taddr));
447 	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
448 	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
449 	    trunc_page(pack.ep_daddr));
450 	vm->vm_dused = 0;
451 	vm->vm_ssize = atop(round_page(pack.ep_ssize));
452 	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
453 	vm->vm_minsaddr = (char *)pack.ep_minsaddr;
454 
455 	/* create the new process's VM space by running the vmcmds */
456 #ifdef DIAGNOSTIC
457 	if (pack.ep_vmcmds.evs_used == 0)
458 		panic("execve: no vmcmds");
459 #endif
460 	error = exec_process_vmcmds(p, &pack);
461 
462 	/* if an error happened, deallocate and punt */
463 	if (error)
464 		goto exec_abort;
465 
466 #ifdef MACHINE_STACK_GROWS_UP
467 	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
468         if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
469             trunc_page(pr->ps_strings), PROT_NONE, TRUE))
470                 goto exec_abort;
471 #else
472 	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
473         if (uvm_map_protect(&vm->vm_map,
474             round_page(pr->ps_strings + sizeof(arginfo)),
475             (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
476                 goto exec_abort;
477 #endif
478 
479 	memset(&arginfo, 0, sizeof(arginfo));
480 
481 	/* remember information about the process */
482 	arginfo.ps_nargvstr = argc;
483 	arginfo.ps_nenvstr = envc;
484 
485 #ifdef MACHINE_STACK_GROWS_UP
486 	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
487 	slen = len - sizeof(arginfo) - sgap;
488 #else
489 	stack = (char *)(vm->vm_minsaddr - len);
490 #endif
491 	/* Now copy argc, args & environ to new stack */
492 	if (!copyargs(&pack, &arginfo, stack, argp))
493 		goto exec_abort;
494 
495 	/* copy out the process's ps_strings structure */
496 	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
497 		goto exec_abort;
498 
499 	stopprofclock(pr);	/* stop profiling */
500 	fdcloseexec(p);		/* handle close on exec */
501 	execsigs(p);		/* reset caught signals */
502 	TCB_SET(p, NULL);	/* reset the TCB address */
503 	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
504 	pr->ps_kbind_cookie = 0;
505 	arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie);
506 
507 	/* set command name & other accounting info */
508 	memset(pr->ps_comm, 0, sizeof(pr->ps_comm));
509 	strlcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, sizeof(pr->ps_comm));
510 	pr->ps_acflag &= ~AFORK;
511 
512 	/* record proc's vnode, for use by sysctl */
513 	otvp = pr->ps_textvp;
514 	vref(pack.ep_vp);
515 	pr->ps_textvp = pack.ep_vp;
516 	if (otvp)
517 		vrele(otvp);
518 
519 	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
520 	if (pr->ps_flags & PS_PPWAIT) {
521 		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
522 		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
523 		wakeup(pr->ps_pptr);
524 	}
525 
526 	/*
527 	 * If process does execve() while it has a mismatched real,
528 	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
529 	 */
530 	if (cred->cr_uid != cred->cr_ruid ||
531 	    cred->cr_uid != cred->cr_svuid ||
532 	    cred->cr_gid != cred->cr_rgid ||
533 	    cred->cr_gid != cred->cr_svgid)
534 		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
535 	else
536 		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);
537 
538 	if (pr->ps_flags & PS_EXECPLEDGE) {
539 		pr->ps_pledge = pr->ps_execpledge;
540 		atomic_setbits_int(&pr->ps_flags, PS_PLEDGE);
541 	} else {
542 		atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE);
543 		pr->ps_pledge = 0;
544 		/* XXX XXX XXX XXX */
545 		/* Clear our unveil paths out so the child
546 		 * starts afresh
547 		 */
548 		unveil_destroy(pr);
549 		pr->ps_uvdone = 0;
550 	}
551 
552 	/*
553 	 * deal with set[ug]id.
554 	 * MNT_NOEXEC has already been used to disable s[ug]id.
555 	 */
556 	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
557 		int i;
558 
559 		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);
560 
561 #ifdef KTRACE
562 		/*
563 		 * If process is being ktraced, turn off - unless
564 		 * root set it.
565 		 */
566 		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
567 			ktrcleartrace(pr);
568 #endif
569 		p->p_ucred = cred = crcopy(cred);
570 		if (attr.va_mode & VSUID)
571 			cred->cr_uid = attr.va_uid;
572 		if (attr.va_mode & VSGID)
573 			cred->cr_gid = attr.va_gid;
574 
575 		/*
576 		 * For set[ug]id processes, a few caveats apply to
577 		 * stdin, stdout, and stderr.
578 		 */
579 		error = 0;
580 		fdplock(p->p_fd);
581 		for (i = 0; i < 3; i++) {
582 			struct file *fp = NULL;
583 
584 			/*
585 			 * NOTE - This will never return NULL because of
586 			 * immature fds. The file descriptor table is not
587 			 * shared because we're suid.
588 			 */
589 			fp = fd_getfile(p->p_fd, i);
590 
591 			/*
592 			 * Ensure that stdin, stdout, and stderr are already
593 			 * allocated.  We do not want userland to accidentally
594 			 * allocate descriptors in this range which has implied
595 			 * meaning to libc.
596 			 */
597 			if (fp == NULL) {
598 				short flags = FREAD | (i == 0 ? 0 : FWRITE);
599 				struct vnode *vp;
600 				int indx;
601 
602 				if ((error = falloc(p, &fp, &indx)) != 0)
603 					break;
604 #ifdef DIAGNOSTIC
605 				if (indx != i)
606 					panic("sys_execve: falloc indx != i");
607 #endif
608 				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
609 					fdremove(p->p_fd, indx);
610 					closef(fp, p);
611 					break;
612 				}
613 				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
614 					fdremove(p->p_fd, indx);
615 					closef(fp, p);
616 					vrele(vp);
617 					break;
618 				}
619 				if (flags & FWRITE)
620 					vp->v_writecount++;
621 				fp->f_flag = flags;
622 				fp->f_type = DTYPE_VNODE;
623 				fp->f_ops = &vnops;
624 				fp->f_data = (caddr_t)vp;
625 				fdinsert(p->p_fd, indx, 0, fp);
626 			}
627 			FRELE(fp, p);
628 		}
629 		fdpunlock(p->p_fd);
630 		if (error)
631 			goto exec_abort;
632 	} else
633 		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);
634 
635 	/*
636 	 * Reset the saved ugids and update the process's copy of the
637 	 * creds if the creds have been changed
638 	 */
639 	if (cred->cr_uid != cred->cr_svuid ||
640 	    cred->cr_gid != cred->cr_svgid) {
641 		/* make sure we have unshared ucreds */
642 		p->p_ucred = cred = crcopy(cred);
643 		cred->cr_svuid = cred->cr_uid;
644 		cred->cr_svgid = cred->cr_gid;
645 	}
646 
647 	if (pr->ps_ucred != cred) {
648 		struct ucred *ocred;
649 
650 		ocred = pr->ps_ucred;
651 		crhold(cred);
652 		pr->ps_ucred = cred;
653 		crfree(ocred);
654 	}
655 
656 	if (pr->ps_flags & PS_SUGIDEXEC) {
657 		cancel_all_itimers();
658 	}
659 
660 	/* reset CPU time usage for the thread, but not the process */
661 	timespecclear(&p->p_tu.tu_runtime);
662 	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
663 
664 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
665 
666 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
667 	vn_close(pack.ep_vp, FREAD, cred, p);
668 
669 	/*
670 	 * notify others that we exec'd
671 	 */
672 	KNOTE(&pr->ps_klist, NOTE_EXEC);
673 
674 	/* map the process's timekeep page, needs to be before exec_elf_fixup */
675 	if (exec_timekeep_map(pr))
676 		goto free_pack_abort;
677 
678 	/* setup new registers and do misc. setup. */
679 	if (exec_elf_fixup(p, &pack) != 0)
680 		goto free_pack_abort;
681 #ifdef MACHINE_STACK_GROWS_UP
682 	setregs(p, &pack, (u_long)stack + slen, retval);
683 #else
684 	setregs(p, &pack, (u_long)stack, retval);
685 #endif
686 
687 	/* map the process's signal trampoline code */
688 	if (exec_sigcode_map(pr))
689 		goto free_pack_abort;
690 
691 #ifdef __HAVE_EXEC_MD_MAP
692 	/* perform md specific mappings that process might need */
693 	if (exec_md_map(p, &pack))
694 		goto free_pack_abort;
695 #endif
696 
697 	if (pr->ps_flags & PS_TRACED)
698 		psignal(p, SIGTRAP);
699 
700 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
701 
702 	p->p_descfd = 255;
703 	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
704 		p->p_descfd = pack.ep_fd;
705 
706 	if (pack.ep_flags & EXEC_WXNEEDED)
707 		atomic_setbits_int(&p->p_p->ps_flags, PS_WXNEEDED);
708 	else
709 		atomic_clearbits_int(&p->p_p->ps_flags, PS_WXNEEDED);
710 
711 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
712 	single_thread_clear(p, P_SUSPSIG);
713 
714 	return (0);
715 
716 bad:
717 	/* free the vmspace-creation commands, and release their references */
718 	kill_vmcmds(&pack.ep_vmcmds);
719 	/* kill any opened file descriptor, if necessary */
720 	if (pack.ep_flags & EXEC_HASFD) {
721 		pack.ep_flags &= ~EXEC_HASFD;
722 		fdplock(p->p_fd);
723 		/* fdrelease unlocks p->p_fd. */
724 		(void) fdrelease(p, pack.ep_fd);
725 	}
726 	if (pack.ep_interp != NULL)
727 		pool_put(&namei_pool, pack.ep_interp);
728 	free(pack.ep_args, M_TEMP, sizeof *pack.ep_args);
729 	/* close and put the exec'd file */
730 	vn_close(pack.ep_vp, FREAD, cred, p);
731 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
732 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
733 
734 freehdr:
735 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
736 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
737 	single_thread_clear(p, P_SUSPSIG);
738 
739 	return (error);
740 
741 exec_abort:
742 	/*
743 	 * the old process doesn't exist anymore.  exit gracefully.
744 	 * get rid of the (new) address space we have created, if any, get rid
745 	 * of our namei data and vnode, and exit noting failure
746 	 */
747 	uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
748 	if (pack.ep_interp != NULL)
749 		pool_put(&namei_pool, pack.ep_interp);
750 	free(pack.ep_args, M_TEMP, sizeof *pack.ep_args);
751 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
752 	vn_close(pack.ep_vp, FREAD, cred, p);
753 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
754 
755 free_pack_abort:
756 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
757 	exit1(p, 0, SIGABRT, EXIT_NORMAL);
758 
759 	/* NOTREACHED */
760 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
761 
762 	return (0);
763 }
764 
765 
766 int
767 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack,
768     void *argp)
769 {
770 	char **cpp = stack;
771 	char *dp, *sp;
772 	size_t len;
773 	void *nullp = NULL;
774 	long argc = arginfo->ps_nargvstr;
775 	int envc = arginfo->ps_nenvstr;
776 
777 	if (copyout(&argc, cpp++, sizeof(argc)))
778 		return (0);
779 
780 	dp = (char *) (cpp + argc + envc + 2 + ELF_AUX_WORDS);
781 	sp = argp;
782 
783 	/* XXX don't copy them out, remap them! */
784 	arginfo->ps_argvstr = cpp; /* remember location of argv for later */
785 
786 	for (; --argc >= 0; sp += len, dp += len)
787 		if (copyout(&dp, cpp++, sizeof(dp)) ||
788 		    copyoutstr(sp, dp, ARG_MAX, &len))
789 			return (0);
790 
791 	if (copyout(&nullp, cpp++, sizeof(nullp)))
792 		return (0);
793 
794 	arginfo->ps_envstr = cpp; /* remember location of envp for later */
795 
796 	for (; --envc >= 0; sp += len, dp += len)
797 		if (copyout(&dp, cpp++, sizeof(dp)) ||
798 		    copyoutstr(sp, dp, ARG_MAX, &len))
799 			return (0);
800 
801 	if (copyout(&nullp, cpp++, sizeof(nullp)))
802 		return (0);
803 
804 	/* if this process needs auxinfo, note where to place it */
805 	if (pack->ep_args != NULL)
806 		pack->ep_auxinfo = cpp;
807 
808 	return (1);
809 }
810 
811 int
812 exec_sigcode_map(struct process *pr)
813 {
814 	extern char sigcode[], esigcode[], sigcoderet[];
815 	vsize_t sz;
816 
817 	sz = (vaddr_t)esigcode - (vaddr_t)sigcode;
818 
819 	/*
820 	 * If we don't have a sigobject yet, create one.
821 	 *
822 	 * sigobject is an anonymous memory object (just like SYSV shared
823 	 * memory) that we keep a permanent reference to and that we map
824 	 * in all processes that need this sigcode. The creation is simple,
825 	 * we create an object, add a permanent reference to it, map it in
826 	 * kernel space, copy out the sigcode to it and unmap it.
827 	 * Then we map it with PROT_READ|PROT_EXEC into the process just
828 	 * the way sys_mmap would map it.
829 	 */
830 	if (sigobject == NULL) {
831 		extern int sigfillsiz;
832 		extern u_char sigfill[];
833 		size_t off, left;
834 		vaddr_t va;
835 		int r;
836 
837 		sigobject = uao_create(sz, 0);
838 		uao_reference(sigobject);	/* permanent reference */
839 
840 		if ((r = uvm_map(kernel_map, &va, round_page(sz), sigobject,
841 		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
842 		    MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) {
843 			uao_detach(sigobject);
844 			return (ENOMEM);
845 		}
846 
847 		for (off = 0, left = round_page(sz); left != 0;
848 		    off += sigfillsiz) {
849 			size_t chunk = ulmin(left, sigfillsiz);
850 			memcpy((caddr_t)va + off, sigfill, chunk);
851 			left -= chunk;
852 		}
853 		memcpy((caddr_t)va, sigcode, sz);
854 		uvm_unmap(kernel_map, va, va + round_page(sz));
855 	}
856 
857 	pr->ps_sigcode = 0; /* no hint */
858 	uao_reference(sigobject);
859 	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz),
860 	    sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC,
861 	    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
862 	    MADV_RANDOM, UVM_FLAG_COPYONW | UVM_FLAG_SYSCALL))) {
863 		uao_detach(sigobject);
864 		return (ENOMEM);
865 	}
866 
867 	/* Calculate PC at point of sigreturn entry */
868 	pr->ps_sigcoderet = pr->ps_sigcode + (sigcoderet - sigcode);
869 
870 	return (0);
871 }
872 
873 int
874 exec_timekeep_map(struct process *pr)
875 {
876 	size_t timekeep_sz = round_page(sizeof(struct timekeep));
877 
878 	/*
879 	 * Similar to the sigcode object
880 	 */
881 	if (timekeep_object == NULL) {
882 		vaddr_t va = 0;
883 
884 		timekeep_object = uao_create(timekeep_sz, 0);
885 		uao_reference(timekeep_object);
886 
887 		if (uvm_map(kernel_map, &va, timekeep_sz, timekeep_object,
888 		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
889 		    MAP_INHERIT_SHARE, MADV_RANDOM, 0))) {
890 			uao_detach(timekeep_object);
891 			timekeep_object = NULL;
892 			return (ENOMEM);
893 		}
894 		if (uvm_fault_wire(kernel_map, va, va + timekeep_sz,
895 		    PROT_READ | PROT_WRITE)) {
896 			uvm_unmap(kernel_map, va, va + timekeep_sz);
897 			uao_detach(timekeep_object);
898 			timekeep_object = NULL;
899 			return (ENOMEM);
900 		}
901 
902 		timekeep = (struct timekeep *)va;
903 		timekeep->tk_version = TK_VERSION;
904 	}
905 
906 	pr->ps_timekeep = 0; /* no hint */
907 	uao_reference(timekeep_object);
908 	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_timekeep, timekeep_sz,
909 	    timekeep_object, 0, 0, UVM_MAPFLAG(PROT_READ, PROT_READ,
910 	    MAP_INHERIT_COPY, MADV_RANDOM, 0))) {
911 		uao_detach(timekeep_object);
912 		return (ENOMEM);
913 	}
914 
915 	return (0);
916 }
917