xref: /openbsd-src/sys/kern/kern_exec.c (revision ae3cb403620ab940fbaabb3055fac045a63d56b7)
1 /*	$OpenBSD: kern_exec.c,v 1.193 2018/01/02 06:38:45 guenther Exp $	*/
2 /*	$NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $	*/
3 
4 /*-
5  * Copyright (C) 1993, 1994 Christopher G. Demetriou
6  * Copyright (C) 1992 Wolfgang Solfrank.
7  * Copyright (C) 1992 TooLs GmbH.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by TooLs GmbH.
21  * 4. The name of TooLs GmbH may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/filedesc.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/pool.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/acct.h>
49 #include <sys/exec.h>
50 #include <sys/ktrace.h>
51 #include <sys/resourcevar.h>
52 #include <sys/wait.h>
53 #include <sys/mman.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/conf.h>
57 #include <sys/pledge.h>
58 #ifdef SYSVSHM
59 #include <sys/shm.h>
60 #endif
61 
62 #include <sys/syscallargs.h>
63 
64 #include <uvm/uvm_extern.h>
65 #include <machine/tcb.h>
66 
67 const struct kmem_va_mode kv_exec = {
68 	.kv_wait = 1,
69 	.kv_map = &exec_map
70 };
71 
72 /*
73  * Map the shared signal code.
74  */
75 int exec_sigcode_map(struct process *, struct emul *);
76 
77 /*
78  * If non-zero, stackgap_random specifies the upper limit of the random gap size
79  * added to the fixed stack position. Must be n^2.
80  */
81 int stackgap_random = STACKGAP_RANDOM;
82 
83 /*
84  * check exec:
85  * given an "executable" described in the exec package's namei info,
86  * see what we can do with it.
87  *
88  * ON ENTRY:
89  *	exec package with appropriate namei info
90  *	proc pointer of exec'ing proc
91  *	NO SELF-LOCKED VNODES
92  *
93  * ON EXIT:
94  *	error:	nothing held, etc.  exec header still allocated.
95  *	ok:	filled exec package, one locked vnode.
96  *
97  * EXEC SWITCH ENTRY:
98  * 	Locked vnode to check, exec package, proc.
99  *
100  * EXEC SWITCH EXIT:
101  *	ok:	return 0, filled exec package, one locked vnode.
102  *	error:	destructive:
103  *			everything deallocated except exec header.
104  *		non-destructive:
105  *			error code, locked vnode, exec header unmodified
106  */
107 int
108 check_exec(struct proc *p, struct exec_package *epp)
109 {
110 	int error, i;
111 	struct vnode *vp;
112 	struct nameidata *ndp;
113 	size_t resid;
114 
115 	ndp = epp->ep_ndp;
116 	ndp->ni_cnd.cn_nameiop = LOOKUP;
117 	ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
118 	/* first get the vnode */
119 	if ((error = namei(ndp)) != 0)
120 		return (error);
121 	epp->ep_vp = vp = ndp->ni_vp;
122 
123 	/* check for regular file */
124 	if (vp->v_type == VDIR) {
125 		error = EISDIR;
126 		goto bad1;
127 	}
128 	if (vp->v_type != VREG) {
129 		error = EACCES;
130 		goto bad1;
131 	}
132 
133 	/* get attributes */
134 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
135 		goto bad1;
136 
137 	/* Check mount point */
138 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
139 		error = EACCES;
140 		goto bad1;
141 	}
142 
143 	/* SUID programs may not be started with execpromises */
144 	if ((epp->ep_vap->va_mode & (VSUID | VSGID)) &&
145 	    (p->p_p->ps_flags & PS_EXECPLEDGE)) {
146 		error = EACCES;
147 		goto bad1;
148 	}
149 
150 	if ((vp->v_mount->mnt_flag & MNT_NOSUID))
151 		epp->ep_vap->va_mode &= ~(VSUID | VSGID);
152 
153 	/* check access.  for root we have to see if any exec bit on */
154 	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
155 		goto bad1;
156 	if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
157 		error = EACCES;
158 		goto bad1;
159 	}
160 
161 	/* try to open it */
162 	if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
163 		goto bad1;
164 
165 	/* unlock vp, we need it unlocked from here */
166 	VOP_UNLOCK(vp, p);
167 
168 	/* now we have the file, get the exec header */
169 	error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
170 	    UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
171 	if (error)
172 		goto bad2;
173 	epp->ep_hdrvalid = epp->ep_hdrlen - resid;
174 
175 	/*
176 	 * set up the vmcmds for creation of the process
177 	 * address space
178 	 */
179 	error = ENOEXEC;
180 	for (i = 0; i < nexecs && error != 0; i++) {
181 		int newerror;
182 
183 		if (execsw[i].es_check == NULL)
184 			continue;
185 		newerror = (*execsw[i].es_check)(p, epp);
186 		if (!newerror && !(epp->ep_emul->e_flags & EMUL_ENABLED))
187 			newerror = EPERM;
188 		/* make sure the first "interesting" error code is saved. */
189 		if (!newerror || error == ENOEXEC)
190 			error = newerror;
191 		if (epp->ep_flags & EXEC_DESTR && error != 0)
192 			return (error);
193 	}
194 	if (!error) {
195 		/* check that entry point is sane */
196 		if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
197 			error = ENOEXEC;
198 		}
199 
200 		/* check limits */
201 		if ((epp->ep_tsize > MAXTSIZ) ||
202 		    (epp->ep_dsize > p->p_rlimit[RLIMIT_DATA].rlim_cur))
203 			error = ENOMEM;
204 
205 		if (!error)
206 			return (0);
207 	}
208 
209 	/*
210 	 * free any vmspace-creation commands,
211 	 * and release their references
212 	 */
213 	kill_vmcmds(&epp->ep_vmcmds);
214 
215 bad2:
216 	/*
217 	 * close the vnode, free the pathname buf, and punt.
218 	 */
219 	vn_close(vp, FREAD, p->p_ucred, p);
220 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
221 	return (error);
222 
223 bad1:
224 	/*
225 	 * free the namei pathname buffer, and put the vnode
226 	 * (which we don't yet have open).
227 	 */
228 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
229 	vput(vp);
230 	return (error);
231 }
232 
233 /*
234  * exec system call
235  */
236 int
237 sys_execve(struct proc *p, void *v, register_t *retval)
238 {
239 	struct sys_execve_args /* {
240 		syscallarg(const char *) path;
241 		syscallarg(char *const *) argp;
242 		syscallarg(char *const *) envp;
243 	} */ *uap = v;
244 	int error;
245 	struct exec_package pack;
246 	struct nameidata nid;
247 	struct vattr attr;
248 	struct ucred *cred = p->p_ucred;
249 	char *argp;
250 	char * const *cpp, *dp, *sp;
251 #ifdef KTRACE
252 	char *env_start;
253 #endif
254 	struct process *pr = p->p_p;
255 	long argc, envc;
256 	size_t len, sgap, dstsize;
257 #ifdef MACHINE_STACK_GROWS_UP
258 	size_t slen;
259 #endif
260 	char *stack;
261 	struct ps_strings arginfo;
262 	struct vmspace *vm;
263 	extern struct emul emul_native;
264 	struct vnode *otvp;
265 
266 	/* get other threads to stop */
267 	if ((error = single_thread_set(p, SINGLE_UNWIND, 1)))
268 		return (error);
269 
270 	/*
271 	 * Cheap solution to complicated problems.
272 	 * Mark this process as "leave me alone, I'm execing".
273 	 */
274 	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);
275 
276 	NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
277 	nid.ni_pledge = PLEDGE_EXEC;
278 
279 	/*
280 	 * initialize the fields of the exec package.
281 	 */
282 	pack.ep_name = (char *)SCARG(uap, path);
283 	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
284 	pack.ep_hdrlen = exec_maxhdrsz;
285 	pack.ep_hdrvalid = 0;
286 	pack.ep_ndp = &nid;
287 	pack.ep_interp = NULL;
288 	pack.ep_emul_arg = NULL;
289 	VMCMDSET_INIT(&pack.ep_vmcmds);
290 	pack.ep_vap = &attr;
291 	pack.ep_emul = &emul_native;
292 	pack.ep_flags = 0;
293 
294 	/* see if we can run it. */
295 	if ((error = check_exec(p, &pack)) != 0) {
296 		goto freehdr;
297 	}
298 
299 	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
300 
301 	/* allocate an argument buffer */
302 	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
303 #ifdef DIAGNOSTIC
304 	if (argp == NULL)
305 		panic("execve: argp == NULL");
306 #endif
307 	dp = argp;
308 	argc = 0;
309 
310 	/*
311 	 * Copy the fake args list, if there's one, freeing it as we go.
312 	 * exec_script_makecmds() allocates either 2 or 3 fake args bounded
313 	 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen.
314 	 */
315 	if (pack.ep_flags & EXEC_HASARGL) {
316 		dstsize = NCARGS;
317 		for(; pack.ep_fa[argc] != NULL; argc++) {
318 			len = strlcpy(dp, pack.ep_fa[argc], dstsize);
319 			len++;
320 			dp += len; dstsize -= len;
321 			if (pack.ep_fa[argc+1] != NULL)
322 				free(pack.ep_fa[argc], M_EXEC, len);
323 			else
324 				free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN);
325 		}
326 		free(pack.ep_fa, M_EXEC, 4 * sizeof(char *));
327 		pack.ep_flags &= ~EXEC_HASARGL;
328 	}
329 
330 	/* Now get argv & environment */
331 	if (!(cpp = SCARG(uap, argp))) {
332 		error = EFAULT;
333 		goto bad;
334 	}
335 
336 	if (pack.ep_flags & EXEC_SKIPARG)
337 		cpp++;
338 
339 	while (1) {
340 		len = argp + ARG_MAX - dp;
341 		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
342 			goto bad;
343 		if (!sp)
344 			break;
345 		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
346 			if (error == ENAMETOOLONG)
347 				error = E2BIG;
348 			goto bad;
349 		}
350 		dp += len;
351 		cpp++;
352 		argc++;
353 	}
354 
355 	/* must have at least one argument */
356 	if (argc == 0) {
357 		error = EINVAL;
358 		goto bad;
359 	}
360 
361 #ifdef KTRACE
362 	if (KTRPOINT(p, KTR_EXECARGS))
363 		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
364 #endif
365 
366 	envc = 0;
367 	/* environment does not need to be there */
368 	if ((cpp = SCARG(uap, envp)) != NULL ) {
369 #ifdef KTRACE
370 		env_start = dp;
371 #endif
372 		while (1) {
373 			len = argp + ARG_MAX - dp;
374 			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
375 				goto bad;
376 			if (!sp)
377 				break;
378 			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
379 				if (error == ENAMETOOLONG)
380 					error = E2BIG;
381 				goto bad;
382 			}
383 			dp += len;
384 			cpp++;
385 			envc++;
386 		}
387 
388 #ifdef KTRACE
389 		if (KTRPOINT(p, KTR_EXECENV))
390 			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
391 #endif
392 	}
393 
394 	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);
395 
396 	sgap = STACKGAPLEN;
397 
398 	/*
399 	 * If we have enabled random stackgap, the stack itself has already
400 	 * been moved from a random location, but is still aligned to a page
401 	 * boundary.  Provide the lower bits of random placement now.
402 	 */
403 	if (stackgap_random != 0) {
404 		sgap += arc4random() & PAGE_MASK;
405 		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
406 	}
407 
408 	/* Now check if args & environ fit into new stack */
409 	len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) +
410 	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;
411 
412 	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;
413 
414 	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
415 		error = ENOMEM;
416 		goto bad;
417 	}
418 
419 	/* adjust "active stack depth" for process VSZ */
420 	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */
421 
422 	/*
423 	 * we're committed: any further errors will kill the process, so
424 	 * kill the other threads now.
425 	 */
426 	single_thread_set(p, SINGLE_EXIT, 0);
427 
428 	/*
429 	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
430 	 * ps_vmspace!
431 	 */
432 	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
433 
434 	vm = pr->ps_vmspace;
435 	/* Now map address space */
436 	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
437 	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
438 	    trunc_page(pack.ep_taddr));
439 	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
440 	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
441 	    trunc_page(pack.ep_daddr));
442 	vm->vm_dused = 0;
443 	vm->vm_ssize = atop(round_page(pack.ep_ssize));
444 	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
445 	vm->vm_minsaddr = (char *)pack.ep_minsaddr;
446 
447 	/* create the new process's VM space by running the vmcmds */
448 #ifdef DIAGNOSTIC
449 	if (pack.ep_vmcmds.evs_used == 0)
450 		panic("execve: no vmcmds");
451 #endif
452 	error = exec_process_vmcmds(p, &pack);
453 
454 	/* if an error happened, deallocate and punt */
455 	if (error)
456 		goto exec_abort;
457 
458 #ifdef MACHINE_STACK_GROWS_UP
459 	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
460         if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
461             trunc_page(pr->ps_strings), PROT_NONE, TRUE))
462                 goto exec_abort;
463 #else
464 	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
465         if (uvm_map_protect(&vm->vm_map,
466             round_page(pr->ps_strings + sizeof(arginfo)),
467             (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
468                 goto exec_abort;
469 #endif
470 
471 	/* remember information about the process */
472 	arginfo.ps_nargvstr = argc;
473 	arginfo.ps_nenvstr = envc;
474 
475 #ifdef MACHINE_STACK_GROWS_UP
476 	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
477 	slen = len - sizeof(arginfo) - sgap;
478 #else
479 	stack = (char *)(vm->vm_minsaddr - len);
480 #endif
481 	/* Now copy argc, args & environ to new stack */
482 	if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp))
483 		goto exec_abort;
484 
485 	/* copy out the process's ps_strings structure */
486 	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
487 		goto exec_abort;
488 
489 	stopprofclock(pr);	/* stop profiling */
490 	fdcloseexec(p);		/* handle close on exec */
491 	execsigs(p);		/* reset caught signals */
492 	TCB_SET(p, NULL);	/* reset the TCB address */
493 	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
494 	pr->ps_kbind_cookie = 0;
495 	arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie);
496 
497 	/* set command name & other accounting info */
498 	memset(pr->ps_comm, 0, sizeof(pr->ps_comm));
499 	len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
500 	memcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, len);
501 	pr->ps_acflag &= ~AFORK;
502 
503 	/* record proc's vnode, for use by sysctl */
504 	otvp = pr->ps_textvp;
505 	vref(pack.ep_vp);
506 	pr->ps_textvp = pack.ep_vp;
507 	if (otvp)
508 		vrele(otvp);
509 
510 	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
511 	if (pr->ps_flags & PS_PPWAIT) {
512 		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
513 		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
514 		wakeup(pr->ps_pptr);
515 	}
516 
517 	/*
518 	 * If process does execve() while it has a mismatched real,
519 	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
520 	 */
521 	if (cred->cr_uid != cred->cr_ruid ||
522 	    cred->cr_uid != cred->cr_svuid ||
523 	    cred->cr_gid != cred->cr_rgid ||
524 	    cred->cr_gid != cred->cr_svgid)
525 		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
526 	else
527 		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);
528 
529 	if (pr->ps_flags & PS_EXECPLEDGE) {
530 		pr->ps_pledge = pr->ps_execpledge;
531 		atomic_setbits_int(&pr->ps_flags, PS_PLEDGE);
532 	} else {
533 		atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE);
534 		pr->ps_pledge = 0;
535 	}
536 
537 	/*
538 	 * deal with set[ug]id.
539 	 * MNT_NOEXEC has already been used to disable s[ug]id.
540 	 */
541 	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
542 		int i;
543 
544 		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);
545 
546 #ifdef KTRACE
547 		/*
548 		 * If process is being ktraced, turn off - unless
549 		 * root set it.
550 		 */
551 		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
552 			ktrcleartrace(pr);
553 #endif
554 		p->p_ucred = cred = crcopy(cred);
555 		if (attr.va_mode & VSUID)
556 			cred->cr_uid = attr.va_uid;
557 		if (attr.va_mode & VSGID)
558 			cred->cr_gid = attr.va_gid;
559 
560 		/*
561 		 * For set[ug]id processes, a few caveats apply to
562 		 * stdin, stdout, and stderr.
563 		 */
564 		error = 0;
565 		fdplock(p->p_fd);
566 		for (i = 0; i < 3; i++) {
567 			struct file *fp = NULL;
568 
569 			/*
570 			 * NOTE - This will never return NULL because of
571 			 * immature fds. The file descriptor table is not
572 			 * shared because we're suid.
573 			 */
574 			fp = fd_getfile(p->p_fd, i);
575 
576 			/*
577 			 * Ensure that stdin, stdout, and stderr are already
578 			 * allocated.  We do not want userland to accidentally
579 			 * allocate descriptors in this range which has implied
580 			 * meaning to libc.
581 			 */
582 			if (fp == NULL) {
583 				short flags = FREAD | (i == 0 ? 0 : FWRITE);
584 				struct vnode *vp;
585 				int indx;
586 
587 				if ((error = falloc(p, 0, &fp, &indx)) != 0)
588 					break;
589 #ifdef DIAGNOSTIC
590 				if (indx != i)
591 					panic("sys_execve: falloc indx != i");
592 #endif
593 				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
594 					fdremove(p->p_fd, indx);
595 					closef(fp, p);
596 					break;
597 				}
598 				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
599 					fdremove(p->p_fd, indx);
600 					closef(fp, p);
601 					vrele(vp);
602 					break;
603 				}
604 				if (flags & FWRITE)
605 					vp->v_writecount++;
606 				fp->f_flag = flags;
607 				fp->f_type = DTYPE_VNODE;
608 				fp->f_ops = &vnops;
609 				fp->f_data = (caddr_t)vp;
610 				FILE_SET_MATURE(fp, p);
611 			}
612 		}
613 		fdpunlock(p->p_fd);
614 		if (error)
615 			goto exec_abort;
616 	} else
617 		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);
618 
619 	/*
620 	 * Reset the saved ugids and update the process's copy of the
621 	 * creds if the creds have been changed
622 	 */
623 	if (cred->cr_uid != cred->cr_svuid ||
624 	    cred->cr_gid != cred->cr_svgid) {
625 		/* make sure we have unshared ucreds */
626 		p->p_ucred = cred = crcopy(cred);
627 		cred->cr_svuid = cred->cr_uid;
628 		cred->cr_svgid = cred->cr_gid;
629 	}
630 
631 	if (pr->ps_ucred != cred) {
632 		struct ucred *ocred;
633 
634 		ocred = pr->ps_ucred;
635 		crhold(cred);
636 		pr->ps_ucred = cred;
637 		crfree(ocred);
638 	}
639 
640 	if (pr->ps_flags & PS_SUGIDEXEC) {
641 		int i, s = splclock();
642 
643 		timeout_del(&pr->ps_realit_to);
644 		for (i = 0; i < nitems(pr->ps_timer); i++) {
645 			timerclear(&pr->ps_timer[i].it_interval);
646 			timerclear(&pr->ps_timer[i].it_value);
647 		}
648 		splx(s);
649 	}
650 
651 	/* reset CPU time usage for the thread, but not the process */
652 	timespecclear(&p->p_tu.tu_runtime);
653 	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
654 
655 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
656 
657 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
658 	vn_close(pack.ep_vp, FREAD, cred, p);
659 
660 	/*
661 	 * notify others that we exec'd
662 	 */
663 	KNOTE(&pr->ps_klist, NOTE_EXEC);
664 
665 	/* setup new registers and do misc. setup. */
666 	if (pack.ep_emul->e_fixup != NULL) {
667 		if ((*pack.ep_emul->e_fixup)(p, &pack) != 0)
668 			goto free_pack_abort;
669 	}
670 #ifdef MACHINE_STACK_GROWS_UP
671 	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval);
672 #else
673 	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval);
674 #endif
675 
676 	/* map the process's signal trampoline code */
677 	if (exec_sigcode_map(pr, pack.ep_emul))
678 		goto free_pack_abort;
679 
680 #ifdef __HAVE_EXEC_MD_MAP
681 	/* perform md specific mappings that process might need */
682 	if (exec_md_map(p, &pack))
683 		goto free_pack_abort;
684 #endif
685 
686 	if (pr->ps_flags & PS_TRACED)
687 		psignal(p, SIGTRAP);
688 
689 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
690 
691 	p->p_descfd = 255;
692 	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
693 		p->p_descfd = pack.ep_fd;
694 
695 	if (pack.ep_flags & EXEC_WXNEEDED)
696 		p->p_p->ps_flags |= PS_WXNEEDED;
697 	else
698 		p->p_p->ps_flags &= ~PS_WXNEEDED;
699 
700 	/* update ps_emul, the old value is no longer needed */
701 	pr->ps_emul = pack.ep_emul;
702 
703 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
704 	single_thread_clear(p, P_SUSPSIG);
705 
706 	return (0);
707 
708 bad:
709 	/* free the vmspace-creation commands, and release their references */
710 	kill_vmcmds(&pack.ep_vmcmds);
711 	/* kill any opened file descriptor, if necessary */
712 	if (pack.ep_flags & EXEC_HASFD) {
713 		pack.ep_flags &= ~EXEC_HASFD;
714 		fdplock(p->p_fd);
715 		(void) fdrelease(p, pack.ep_fd);
716 		fdpunlock(p->p_fd);
717 	}
718 	if (pack.ep_interp != NULL)
719 		pool_put(&namei_pool, pack.ep_interp);
720 	if (pack.ep_emul_arg != NULL)
721 		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
722 	/* close and put the exec'd file */
723 	vn_close(pack.ep_vp, FREAD, cred, p);
724 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
725 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
726 
727 freehdr:
728 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
729 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
730 	single_thread_clear(p, P_SUSPSIG);
731 
732 	return (error);
733 
734 exec_abort:
735 	/*
736 	 * the old process doesn't exist anymore.  exit gracefully.
737 	 * get rid of the (new) address space we have created, if any, get rid
738 	 * of our namei data and vnode, and exit noting failure
739 	 */
740 	uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
741 		VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
742 	if (pack.ep_interp != NULL)
743 		pool_put(&namei_pool, pack.ep_interp);
744 	if (pack.ep_emul_arg != NULL)
745 		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
746 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
747 	vn_close(pack.ep_vp, FREAD, cred, p);
748 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
749 
750 free_pack_abort:
751 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
752 	exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL);
753 
754 	/* NOTREACHED */
755 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
756 
757 	return (0);
758 }
759 
760 
761 void *
762 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack,
763     void *argp)
764 {
765 	char **cpp = stack;
766 	char *dp, *sp;
767 	size_t len;
768 	void *nullp = NULL;
769 	long argc = arginfo->ps_nargvstr;
770 	int envc = arginfo->ps_nenvstr;
771 
772 	if (copyout(&argc, cpp++, sizeof(argc)))
773 		return (NULL);
774 
775 	dp = (char *) (cpp + argc + envc + 2 + pack->ep_emul->e_arglen);
776 	sp = argp;
777 
778 	/* XXX don't copy them out, remap them! */
779 	arginfo->ps_argvstr = cpp; /* remember location of argv for later */
780 
781 	for (; --argc >= 0; sp += len, dp += len)
782 		if (copyout(&dp, cpp++, sizeof(dp)) ||
783 		    copyoutstr(sp, dp, ARG_MAX, &len))
784 			return (NULL);
785 
786 	if (copyout(&nullp, cpp++, sizeof(nullp)))
787 		return (NULL);
788 
789 	arginfo->ps_envstr = cpp; /* remember location of envp for later */
790 
791 	for (; --envc >= 0; sp += len, dp += len)
792 		if (copyout(&dp, cpp++, sizeof(dp)) ||
793 		    copyoutstr(sp, dp, ARG_MAX, &len))
794 			return (NULL);
795 
796 	if (copyout(&nullp, cpp++, sizeof(nullp)))
797 		return (NULL);
798 
799 	return (cpp);
800 }
801 
802 int
803 exec_sigcode_map(struct process *pr, struct emul *e)
804 {
805 	vsize_t sz;
806 
807 	sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
808 
809 	/*
810 	 * If we don't have a sigobject for this emulation, create one.
811 	 *
812 	 * sigobject is an anonymous memory object (just like SYSV shared
813 	 * memory) that we keep a permanent reference to and that we map
814 	 * in all processes that need this sigcode. The creation is simple,
815 	 * we create an object, add a permanent reference to it, map it in
816 	 * kernel space, copy out the sigcode to it and unmap it.
817 	 * Then we map it with PROT_READ|PROT_EXEC into the process just
818 	 * the way sys_mmap would map it.
819 	 */
820 	if (e->e_sigobject == NULL) {
821 		extern int sigfillsiz;
822 		extern u_char sigfill[];
823 		size_t off;
824 		vaddr_t va;
825 		int r;
826 
827 		e->e_sigobject = uao_create(sz, 0);
828 		uao_reference(e->e_sigobject);	/* permanent reference */
829 
830 		if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject,
831 		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
832 		    MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) {
833 			uao_detach(e->e_sigobject);
834 			return (ENOMEM);
835 		}
836 
837 		for (off = 0; off < round_page(sz); off += sigfillsiz)
838 			memcpy((caddr_t)va + off, sigfill, sigfillsiz);
839 		memcpy((caddr_t)va, e->e_sigcode, sz);
840 		uvm_unmap(kernel_map, va, va + round_page(sz));
841 	}
842 
843 	pr->ps_sigcode = 0; /* no hint */
844 	uao_reference(e->e_sigobject);
845 	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz),
846 	    e->e_sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC,
847 	    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
848 	    MADV_RANDOM, UVM_FLAG_COPYONW))) {
849 		uao_detach(e->e_sigobject);
850 		return (ENOMEM);
851 	}
852 
853 	/* Calculate PC at point of sigreturn entry */
854 	pr->ps_sigcoderet = pr->ps_sigcode +
855 	    (pr->ps_emul->e_esigret - pr->ps_emul->e_sigcode);
856 
857 	return (0);
858 }
859