xref: /openbsd-src/sys/kern/kern_exec.c (revision 3bef86f7bc2197c76d5fec5b22e91f84e96ed5e5)
1 /*	$OpenBSD: kern_exec.c,v 1.254 2024/01/17 18:56:13 deraadt Exp $	*/
2 /*	$NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $	*/
3 
4 /*-
5  * Copyright (C) 1993, 1994 Christopher G. Demetriou
6  * Copyright (C) 1992 Wolfgang Solfrank.
7  * Copyright (C) 1992 TooLs GmbH.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by TooLs GmbH.
21  * 4. The name of TooLs GmbH may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/filedesc.h>
39 #include <sys/proc.h>
40 #include <sys/user.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/pool.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/acct.h>
49 #include <sys/exec.h>
50 #include <sys/exec_elf.h>
51 #include <sys/ktrace.h>
52 #include <sys/resourcevar.h>
53 #include <sys/mman.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/conf.h>
57 #include <sys/pledge.h>
58 #ifdef SYSVSHM
59 #include <sys/shm.h>
60 #endif
61 
62 #include <sys/syscallargs.h>
63 
64 #include <uvm/uvm_extern.h>
65 #include <machine/tcb.h>
66 
67 #include <sys/timetc.h>
68 
69 struct uvm_object *sigobject;		/* shared sigcode object */
70 vaddr_t sigcode_va;
71 vsize_t sigcode_sz;
72 struct uvm_object *timekeep_object;
73 struct timekeep *timekeep;
74 
75 void	unveil_destroy(struct process *ps);
76 
77 const struct kmem_va_mode kv_exec = {
78 	.kv_wait = 1,
79 	.kv_map = &exec_map
80 };
81 
82 /*
83  * Map the shared signal code.
84  */
85 int exec_sigcode_map(struct process *);
86 
87 /*
88  * Map the shared timekeep page.
89  */
90 int exec_timekeep_map(struct process *);
91 
92 /*
93  * If non-zero, stackgap_random specifies the upper limit of the random gap size
94  * added to the fixed stack position. Must be n^2.
95  */
96 int stackgap_random = STACKGAP_RANDOM;
97 
98 /*
99  * check exec:
100  * given an "executable" described in the exec package's namei info,
101  * see what we can do with it.
102  *
103  * ON ENTRY:
104  *	exec package with appropriate namei info
105  *	proc pointer of exec'ing proc
106  *	NO SELF-LOCKED VNODES
107  *
108  * ON EXIT:
109  *	error:	nothing held, etc.  exec header still allocated.
110  *	ok:	filled exec package, one locked vnode.
111  *
112  * EXEC SWITCH ENTRY:
113  * 	Locked vnode to check, exec package, proc.
114  *
115  * EXEC SWITCH EXIT:
116  *	ok:	return 0, filled exec package, one locked vnode.
117  *	error:	destructive:
118  *			everything deallocated except exec header.
119  *		non-destructive:
120  *			error code, locked vnode, exec header unmodified
121  */
122 int
123 check_exec(struct proc *p, struct exec_package *epp)
124 {
125 	int error, i;
126 	struct vnode *vp;
127 	struct nameidata *ndp;
128 	size_t resid;
129 
130 	ndp = epp->ep_ndp;
131 	ndp->ni_cnd.cn_nameiop = LOOKUP;
132 	ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
133 	if (epp->ep_flags & EXEC_INDIR)
134 		ndp->ni_cnd.cn_flags |= BYPASSUNVEIL;
135 	/* first get the vnode */
136 	if ((error = namei(ndp)) != 0)
137 		return (error);
138 	epp->ep_vp = vp = ndp->ni_vp;
139 
140 	/* check for regular file */
141 	if (vp->v_type != VREG) {
142 		error = EACCES;
143 		goto bad1;
144 	}
145 
146 	/* get attributes */
147 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
148 		goto bad1;
149 
150 	/* Check mount point */
151 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
152 		error = EACCES;
153 		goto bad1;
154 	}
155 
156 	/* SUID programs may not be started with execpromises */
157 	if ((epp->ep_vap->va_mode & (VSUID | VSGID)) &&
158 	    (p->p_p->ps_flags & PS_EXECPLEDGE)) {
159 		error = EACCES;
160 		goto bad1;
161 	}
162 
163 	if ((vp->v_mount->mnt_flag & MNT_NOSUID))
164 		epp->ep_vap->va_mode &= ~(VSUID | VSGID);
165 
166 	/* check access.  for root we have to see if any exec bit on */
167 	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
168 		goto bad1;
169 	if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
170 		error = EACCES;
171 		goto bad1;
172 	}
173 
174 	/* try to open it */
175 	if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
176 		goto bad1;
177 
178 	/* unlock vp, we need it unlocked from here */
179 	VOP_UNLOCK(vp);
180 
181 	/* now we have the file, get the exec header */
182 	error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
183 	    UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
184 	if (error)
185 		goto bad2;
186 	epp->ep_hdrvalid = epp->ep_hdrlen - resid;
187 
188 	/*
189 	 * set up the vmcmds for creation of the process
190 	 * address space
191 	 */
192 	error = ENOEXEC;
193 	for (i = 0; i < nexecs && error != 0; i++) {
194 		int newerror;
195 
196 		if (execsw[i].es_check == NULL)
197 			continue;
198 		newerror = (*execsw[i].es_check)(p, epp);
199 		/* make sure the first "interesting" error code is saved. */
200 		if (!newerror || error == ENOEXEC)
201 			error = newerror;
202 		if (epp->ep_flags & EXEC_DESTR && error != 0)
203 			return (error);
204 	}
205 	if (!error) {
206 		/* check that entry point is sane */
207 		if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
208 			error = ENOEXEC;
209 		}
210 
211 		/* check limits */
212 		if ((epp->ep_tsize > MAXTSIZ) ||
213 		    (epp->ep_dsize > lim_cur(RLIMIT_DATA)))
214 			error = ENOMEM;
215 
216 		if (!error)
217 			return (0);
218 	}
219 
220 	/*
221 	 * free any vmspace-creation commands,
222 	 * and release their references
223 	 */
224 	kill_vmcmds(&epp->ep_vmcmds);
225 
226 bad2:
227 	/*
228 	 * close the vnode, free the pathname buf, and punt.
229 	 */
230 	vn_close(vp, FREAD, p->p_ucred, p);
231 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
232 	return (error);
233 
234 bad1:
235 	/*
236 	 * free the namei pathname buffer, and put the vnode
237 	 * (which we don't yet have open).
238 	 */
239 	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
240 	vput(vp);
241 	return (error);
242 }
243 
244 /*
245  * exec system call
246  */
247 int
248 sys_execve(struct proc *p, void *v, register_t *retval)
249 {
250 	struct sys_execve_args /* {
251 		syscallarg(const char *) path;
252 		syscallarg(char *const *) argp;
253 		syscallarg(char *const *) envp;
254 	} */ *uap = v;
255 	int error;
256 	struct exec_package pack;
257 	struct nameidata nid;
258 	struct vattr attr;
259 	struct ucred *cred = p->p_ucred;
260 	char *argp;
261 	char * const *cpp, *dp, *sp;
262 #ifdef KTRACE
263 	char *env_start;
264 #endif
265 	struct process *pr = p->p_p;
266 	long argc, envc;
267 	size_t len, sgap, dstsize;
268 #ifdef MACHINE_STACK_GROWS_UP
269 	size_t slen;
270 #endif
271 	char *stack;
272 	struct ps_strings arginfo;
273 	struct vmspace *vm = p->p_vmspace;
274 	struct vnode *otvp;
275 
276 	/*
277 	 * Get other threads to stop, if contested return ERESTART,
278 	 * so the syscall is restarted after halting in userret.
279 	 */
280 	if (single_thread_set(p, SINGLE_UNWIND | SINGLE_DEEP))
281 		return (ERESTART);
282 
283 	/*
284 	 * Cheap solution to complicated problems.
285 	 * Mark this process as "leave me alone, I'm execing".
286 	 */
287 	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);
288 
289 	NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
290 	nid.ni_pledge = PLEDGE_EXEC;
291 	nid.ni_unveil = UNVEIL_EXEC;
292 
293 	/*
294 	 * initialize the fields of the exec package.
295 	 */
296 	pack.ep_name = (char *)SCARG(uap, path);
297 	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
298 	pack.ep_hdrlen = exec_maxhdrsz;
299 	pack.ep_hdrvalid = 0;
300 	pack.ep_ndp = &nid;
301 	pack.ep_interp = NULL;
302 	pack.ep_args = NULL;
303 	pack.ep_auxinfo = NULL;
304 	VMCMDSET_INIT(&pack.ep_vmcmds);
305 	pack.ep_vap = &attr;
306 	pack.ep_flags = 0;
307 	pack.ep_pins = NULL;
308 	pack.ep_npins = 0;
309 
310 	/* see if we can run it. */
311 	if ((error = check_exec(p, &pack)) != 0) {
312 		goto freehdr;
313 	}
314 
315 	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
316 
317 	/* allocate an argument buffer */
318 	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
319 #ifdef DIAGNOSTIC
320 	if (argp == NULL)
321 		panic("execve: argp == NULL");
322 #endif
323 	dp = argp;
324 	argc = 0;
325 
326 	/*
327 	 * Copy the fake args list, if there's one, freeing it as we go.
328 	 * exec_script_makecmds() allocates either 2 or 3 fake args bounded
329 	 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen.
330 	 */
331 	if (pack.ep_flags & EXEC_HASARGL) {
332 		dstsize = NCARGS;
333 		for(; pack.ep_fa[argc] != NULL; argc++) {
334 			len = strlcpy(dp, pack.ep_fa[argc], dstsize);
335 			len++;
336 			dp += len; dstsize -= len;
337 			if (pack.ep_fa[argc+1] != NULL)
338 				free(pack.ep_fa[argc], M_EXEC, len);
339 			else
340 				free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN);
341 		}
342 		free(pack.ep_fa, M_EXEC, 4 * sizeof(char *));
343 		pack.ep_flags &= ~EXEC_HASARGL;
344 	}
345 
346 	/* Now get argv & environment */
347 	if (!(cpp = SCARG(uap, argp))) {
348 		error = EFAULT;
349 		goto bad;
350 	}
351 
352 	if (pack.ep_flags & EXEC_SKIPARG)
353 		cpp++;
354 
355 	while (1) {
356 		len = argp + ARG_MAX - dp;
357 		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
358 			goto bad;
359 		if (!sp)
360 			break;
361 		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
362 			if (error == ENAMETOOLONG)
363 				error = E2BIG;
364 			goto bad;
365 		}
366 		dp += len;
367 		cpp++;
368 		argc++;
369 	}
370 
371 	/* must have at least one argument */
372 	if (argc == 0) {
373 		error = EINVAL;
374 		goto bad;
375 	}
376 
377 #ifdef KTRACE
378 	if (KTRPOINT(p, KTR_EXECARGS))
379 		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
380 #endif
381 
382 	envc = 0;
383 	/* environment does not need to be there */
384 	if ((cpp = SCARG(uap, envp)) != NULL ) {
385 #ifdef KTRACE
386 		env_start = dp;
387 #endif
388 		while (1) {
389 			len = argp + ARG_MAX - dp;
390 			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
391 				goto bad;
392 			if (!sp)
393 				break;
394 			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
395 				if (error == ENAMETOOLONG)
396 					error = E2BIG;
397 				goto bad;
398 			}
399 			dp += len;
400 			cpp++;
401 			envc++;
402 		}
403 
404 #ifdef KTRACE
405 		if (KTRPOINT(p, KTR_EXECENV))
406 			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
407 #endif
408 	}
409 
410 	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);
411 
412 	/*
413 	 * If we have enabled random stackgap, the stack itself has already
414 	 * been moved from a random location, but is still aligned to a page
415 	 * boundary.  Provide the lower bits of random placement now.
416 	 */
417 	if (stackgap_random == 0) {
418 		sgap = 0;
419 	} else {
420 		sgap = arc4random() & PAGE_MASK;
421 		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
422 	}
423 
424 	/* Now check if args & environ fit into new stack */
425 	len = ((argc + envc + 2 + ELF_AUX_WORDS) * sizeof(char *) +
426 	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;
427 
428 	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;
429 
430 	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
431 		error = ENOMEM;
432 		goto bad;
433 	}
434 
435 	/* adjust "active stack depth" for process VSZ */
436 	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */
437 
438 	/*
439 	 * we're committed: any further errors will kill the process, so
440 	 * kill the other threads now.
441 	 */
442 	single_thread_set(p, SINGLE_EXIT);
443 
444 	/*
445 	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
446 	 * ps_vmspace!
447 	 */
448 	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
449 
450 	vm = pr->ps_vmspace;
451 	/* Now map address space */
452 	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
453 	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
454 	    trunc_page(pack.ep_taddr));
455 	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
456 	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
457 	    trunc_page(pack.ep_daddr));
458 	vm->vm_dused = 0;
459 	vm->vm_ssize = atop(round_page(pack.ep_ssize));
460 	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
461 	vm->vm_minsaddr = (char *)pack.ep_minsaddr;
462 
463 	/* create the new process's VM space by running the vmcmds */
464 #ifdef DIAGNOSTIC
465 	if (pack.ep_vmcmds.evs_used == 0)
466 		panic("execve: no vmcmds");
467 #endif
468 	error = exec_process_vmcmds(p, &pack);
469 
470 	/* if an error happened, deallocate and punt */
471 	if (error)
472 		goto exec_abort;
473 
474 #ifdef MACHINE_STACK_GROWS_UP
475 	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
476         if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
477             trunc_page(pr->ps_strings), PROT_NONE, 0, TRUE, FALSE))
478                 goto exec_abort;
479 #else
480 	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
481         if (uvm_map_protect(&vm->vm_map,
482             round_page(pr->ps_strings + sizeof(arginfo)),
483             (vaddr_t)vm->vm_minsaddr, PROT_NONE, 0, TRUE, FALSE))
484                 goto exec_abort;
485 #endif
486 
487 	memset(&arginfo, 0, sizeof(arginfo));
488 
489 	/* remember information about the process */
490 	arginfo.ps_nargvstr = argc;
491 	arginfo.ps_nenvstr = envc;
492 
493 #ifdef MACHINE_STACK_GROWS_UP
494 	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
495 	slen = len - sizeof(arginfo) - sgap;
496 #else
497 	stack = (char *)(vm->vm_minsaddr - len);
498 #endif
499 	/* Now copy argc, args & environ to new stack */
500 	if (!copyargs(&pack, &arginfo, stack, argp))
501 		goto exec_abort;
502 
503 	pr->ps_auxinfo = (vaddr_t)pack.ep_auxinfo;
504 
505 	/* copy out the process's ps_strings structure */
506 	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
507 		goto exec_abort;
508 
509 	free(pr->ps_pin.pn_pins, M_PINSYSCALL,
510 	    pr->ps_pin.pn_npins * sizeof(u_int));
511 	if (pack.ep_npins) {
512 		pr->ps_pin.pn_start = pack.ep_pinstart;
513 		pr->ps_pin.pn_end = pack.ep_pinend;
514 		pr->ps_pin.pn_pins = pack.ep_pins;
515 		pack.ep_pins = NULL;
516 		pr->ps_pin.pn_npins = pack.ep_npins;
517 		pr->ps_flags |= PS_PIN;
518 	} else {
519 		pr->ps_pin.pn_start = pr->ps_pin.pn_end = 0;
520 		pr->ps_pin.pn_pins = NULL;
521 		pr->ps_pin.pn_npins = 0;
522 		pr->ps_flags &= ~PS_PIN;
523 	}
524 	if (pr->ps_libcpin.pn_pins) {
525 		free(pr->ps_libcpin.pn_pins, M_PINSYSCALL,
526 		    pr->ps_libcpin.pn_npins * sizeof(u_int));
527 		pr->ps_libcpin.pn_start = pr->ps_libcpin.pn_end = 0;
528 		pr->ps_libcpin.pn_pins = NULL;
529 		pr->ps_libcpin.pn_npins = 0;
530 		pr->ps_flags &= ~PS_LIBCPIN;
531 	}
532 
533 	stopprofclock(pr);	/* stop profiling */
534 	fdcloseexec(p);		/* handle close on exec */
535 	execsigs(p);		/* reset caught signals */
536 	TCB_SET(p, NULL);	/* reset the TCB address */
537 	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
538 	pr->ps_kbind_cookie = 0;
539 	arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie);
540 
541 	/* set command name & other accounting info */
542 	memset(pr->ps_comm, 0, sizeof(pr->ps_comm));
543 	strlcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, sizeof(pr->ps_comm));
544 	pr->ps_acflag &= ~AFORK;
545 
546 	/* record proc's vnode, for use by sysctl */
547 	otvp = pr->ps_textvp;
548 	vref(pack.ep_vp);
549 	pr->ps_textvp = pack.ep_vp;
550 	if (otvp)
551 		vrele(otvp);
552 
553 	if (pack.ep_flags & EXEC_NOBTCFI)
554 		atomic_setbits_int(&p->p_p->ps_flags, PS_NOBTCFI);
555 	else
556 		atomic_clearbits_int(&p->p_p->ps_flags, PS_NOBTCFI);
557 
558 	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
559 	if (pr->ps_flags & PS_PPWAIT) {
560 		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
561 		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
562 		wakeup(pr->ps_pptr);
563 	}
564 
565 	/*
566 	 * If process does execve() while it has a mismatched real,
567 	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
568 	 */
569 	if (cred->cr_uid != cred->cr_ruid ||
570 	    cred->cr_uid != cred->cr_svuid ||
571 	    cred->cr_gid != cred->cr_rgid ||
572 	    cred->cr_gid != cred->cr_svgid)
573 		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
574 	else
575 		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);
576 
577 	if (pr->ps_flags & PS_EXECPLEDGE) {
578 		pr->ps_pledge = pr->ps_execpledge;
579 		atomic_setbits_int(&pr->ps_flags, PS_PLEDGE);
580 	} else {
581 		atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE);
582 		pr->ps_pledge = 0;
583 		/* XXX XXX XXX XXX */
584 		/* Clear our unveil paths out so the child
585 		 * starts afresh
586 		 */
587 		unveil_destroy(pr);
588 		pr->ps_uvdone = 0;
589 	}
590 
591 	/*
592 	 * deal with set[ug]id.
593 	 * MNT_NOEXEC has already been used to disable s[ug]id.
594 	 */
595 	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
596 		int i;
597 
598 		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);
599 
600 #ifdef KTRACE
601 		/*
602 		 * If process is being ktraced, turn off - unless
603 		 * root set it.
604 		 */
605 		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
606 			ktrcleartrace(pr);
607 #endif
608 		p->p_ucred = cred = crcopy(cred);
609 		if (attr.va_mode & VSUID)
610 			cred->cr_uid = attr.va_uid;
611 		if (attr.va_mode & VSGID)
612 			cred->cr_gid = attr.va_gid;
613 
614 		/*
615 		 * For set[ug]id processes, a few caveats apply to
616 		 * stdin, stdout, and stderr.
617 		 */
618 		error = 0;
619 		fdplock(p->p_fd);
620 		for (i = 0; i < 3; i++) {
621 			struct file *fp = NULL;
622 
623 			/*
624 			 * NOTE - This will never return NULL because of
625 			 * immature fds. The file descriptor table is not
626 			 * shared because we're suid.
627 			 */
628 			fp = fd_getfile(p->p_fd, i);
629 
630 			/*
631 			 * Ensure that stdin, stdout, and stderr are already
632 			 * allocated.  We do not want userland to accidentally
633 			 * allocate descriptors in this range which has implied
634 			 * meaning to libc.
635 			 */
636 			if (fp == NULL) {
637 				short flags = FREAD | (i == 0 ? 0 : FWRITE);
638 				struct vnode *vp;
639 				int indx;
640 
641 				if ((error = falloc(p, &fp, &indx)) != 0)
642 					break;
643 #ifdef DIAGNOSTIC
644 				if (indx != i)
645 					panic("sys_execve: falloc indx != i");
646 #endif
647 				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
648 					fdremove(p->p_fd, indx);
649 					closef(fp, p);
650 					break;
651 				}
652 				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
653 					fdremove(p->p_fd, indx);
654 					closef(fp, p);
655 					vrele(vp);
656 					break;
657 				}
658 				if (flags & FWRITE)
659 					vp->v_writecount++;
660 				fp->f_flag = flags;
661 				fp->f_type = DTYPE_VNODE;
662 				fp->f_ops = &vnops;
663 				fp->f_data = (caddr_t)vp;
664 				fdinsert(p->p_fd, indx, 0, fp);
665 			}
666 			FRELE(fp, p);
667 		}
668 		fdpunlock(p->p_fd);
669 		if (error)
670 			goto exec_abort;
671 	} else
672 		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);
673 
674 	/*
675 	 * Reset the saved ugids and update the process's copy of the
676 	 * creds if the creds have been changed
677 	 */
678 	if (cred->cr_uid != cred->cr_svuid ||
679 	    cred->cr_gid != cred->cr_svgid) {
680 		/* make sure we have unshared ucreds */
681 		p->p_ucred = cred = crcopy(cred);
682 		cred->cr_svuid = cred->cr_uid;
683 		cred->cr_svgid = cred->cr_gid;
684 	}
685 
686 	if (pr->ps_ucred != cred) {
687 		struct ucred *ocred;
688 
689 		ocred = pr->ps_ucred;
690 		crhold(cred);
691 		pr->ps_ucred = cred;
692 		crfree(ocred);
693 	}
694 
695 	if (pr->ps_flags & PS_SUGIDEXEC) {
696 		cancel_all_itimers();
697 	}
698 
699 	/* reset CPU time usage for the thread, but not the process */
700 	timespecclear(&p->p_tu.tu_runtime);
701 	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
702 
703 	memset(p->p_name, 0, sizeof p->p_name);
704 
705 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
706 
707 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
708 	vn_close(pack.ep_vp, FREAD, cred, p);
709 
710 	/*
711 	 * notify others that we exec'd
712 	 */
713 	knote_locked(&pr->ps_klist, NOTE_EXEC);
714 
715 	/* map the process's timekeep page, needs to be before exec_elf_fixup */
716 	if (exec_timekeep_map(pr))
717 		goto free_pack_abort;
718 
719 	/* setup new registers and do misc. setup. */
720 	if (exec_elf_fixup(p, &pack) != 0)
721 		goto free_pack_abort;
722 #ifdef MACHINE_STACK_GROWS_UP
723 	setregs(p, &pack, (u_long)stack + slen, &arginfo);
724 #else
725 	setregs(p, &pack, (u_long)stack, &arginfo);
726 #endif
727 
728 	/* map the process's signal trampoline code */
729 	if (exec_sigcode_map(pr))
730 		goto free_pack_abort;
731 
732 #ifdef __HAVE_EXEC_MD_MAP
733 	/* perform md specific mappings that process might need */
734 	if (exec_md_map(p, &pack))
735 		goto free_pack_abort;
736 #endif
737 
738 	if (pr->ps_flags & PS_TRACED)
739 		psignal(p, SIGTRAP);
740 
741 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
742 
743 	p->p_descfd = 255;
744 	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
745 		p->p_descfd = pack.ep_fd;
746 
747 	if (pack.ep_flags & EXEC_WXNEEDED)
748 		atomic_setbits_int(&p->p_p->ps_flags, PS_WXNEEDED);
749 	else
750 		atomic_clearbits_int(&p->p_p->ps_flags, PS_WXNEEDED);
751 
752 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
753 	single_thread_clear(p, P_SUSPSIG);
754 
755 	/* setregs() sets up all the registers, so just 'return' */
756 	return EJUSTRETURN;
757 
758 bad:
759 	/* free the vmspace-creation commands, and release their references */
760 	kill_vmcmds(&pack.ep_vmcmds);
761 	/* kill any opened file descriptor, if necessary */
762 	if (pack.ep_flags & EXEC_HASFD) {
763 		pack.ep_flags &= ~EXEC_HASFD;
764 		fdplock(p->p_fd);
765 		/* fdrelease unlocks p->p_fd. */
766 		(void) fdrelease(p, pack.ep_fd);
767 	}
768 	if (pack.ep_interp != NULL)
769 		pool_put(&namei_pool, pack.ep_interp);
770 	free(pack.ep_args, M_TEMP, sizeof *pack.ep_args);
771 	free(pack.ep_pins, M_PINSYSCALL, pack.ep_npins * sizeof(u_int));
772 	/* close and put the exec'd file */
773 	vn_close(pack.ep_vp, FREAD, cred, p);
774 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
775 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
776 
777 freehdr:
778 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
779 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
780 	single_thread_clear(p, P_SUSPSIG);
781 
782 	return (error);
783 
784 exec_abort:
785 	/*
786 	 * the old process doesn't exist anymore.  exit gracefully.
787 	 * get rid of the (new) address space we have created, if any, get rid
788 	 * of our namei data and vnode, and exit noting failure
789 	 */
790 	uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
791 	if (pack.ep_interp != NULL)
792 		pool_put(&namei_pool, pack.ep_interp);
793 	free(pack.ep_args, M_TEMP, sizeof *pack.ep_args);
794 	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
795 	vn_close(pack.ep_vp, FREAD, cred, p);
796 	km_free(argp, NCARGS, &kv_exec, &kp_pageable);
797 
798 free_pack_abort:
799 	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
800 	exit1(p, 0, SIGABRT, EXIT_NORMAL);
801 
802 	/* NOTREACHED */
803 	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
804 
805 	return (0);
806 }
807 
808 
809 int
810 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack,
811     void *argp)
812 {
813 	char **cpp = stack;
814 	char *dp, *sp;
815 	size_t len;
816 	void *nullp = NULL;
817 	long argc = arginfo->ps_nargvstr;
818 	int envc = arginfo->ps_nenvstr;
819 
820 	if (copyout(&argc, cpp++, sizeof(argc)))
821 		return (0);
822 
823 	dp = (char *) (cpp + argc + envc + 2 + ELF_AUX_WORDS);
824 	sp = argp;
825 
826 	/* XXX don't copy them out, remap them! */
827 	arginfo->ps_argvstr = cpp; /* remember location of argv for later */
828 
829 	for (; --argc >= 0; sp += len, dp += len)
830 		if (copyout(&dp, cpp++, sizeof(dp)) ||
831 		    copyoutstr(sp, dp, ARG_MAX, &len))
832 			return (0);
833 
834 	if (copyout(&nullp, cpp++, sizeof(nullp)))
835 		return (0);
836 
837 	arginfo->ps_envstr = cpp; /* remember location of envp for later */
838 
839 	for (; --envc >= 0; sp += len, dp += len)
840 		if (copyout(&dp, cpp++, sizeof(dp)) ||
841 		    copyoutstr(sp, dp, ARG_MAX, &len))
842 			return (0);
843 
844 	if (copyout(&nullp, cpp++, sizeof(nullp)))
845 		return (0);
846 
847 	/* if this process needs auxinfo, note where to place it */
848 	if (pack->ep_args != NULL)
849 		pack->ep_auxinfo = cpp;
850 
851 	return (1);
852 }
853 
854 int
855 exec_sigcode_map(struct process *pr)
856 {
857 	extern char sigcode[], esigcode[], sigcoderet[];
858 	vsize_t sz;
859 
860 	sz = (vaddr_t)esigcode - (vaddr_t)sigcode;
861 
862 	/*
863 	 * If we don't have a sigobject yet, create one.
864 	 *
865 	 * sigobject is an anonymous memory object (just like SYSV shared
866 	 * memory) that we keep a permanent reference to and that we map
867 	 * in all processes that need this sigcode. The creation is simple,
868 	 * we create an object, add a permanent reference to it, map it in
869 	 * kernel space, copy out the sigcode to it and unmap it.  Then we map
870 	 * it with PROT_EXEC into the process just the way sys_mmap would map it.
871 	 */
872 	if (sigobject == NULL) {
873 		extern int sigfillsiz;
874 		extern u_char sigfill[];
875 		size_t off, left;
876 		vaddr_t va;
877 		int r;
878 
879 		sigobject = uao_create(sz, 0);
880 		uao_reference(sigobject);	/* permanent reference */
881 
882 		if ((r = uvm_map(kernel_map, &va, round_page(sz), sigobject,
883 		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
884 		    MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) {
885 			uao_detach(sigobject);
886 			return (ENOMEM);
887 		}
888 
889 		for (off = 0, left = round_page(sz); left != 0;
890 		    off += sigfillsiz) {
891 			size_t chunk = ulmin(left, sigfillsiz);
892 			memcpy((caddr_t)va + off, sigfill, chunk);
893 			left -= chunk;
894 		}
895 		memcpy((caddr_t)va, sigcode, sz);
896 
897 		(void) uvm_map_protect(kernel_map, va, round_page(sz),
898 		    PROT_READ, 0, FALSE, FALSE);
899 		sigcode_va = va;
900 		sigcode_sz = round_page(sz);
901 	}
902 
903 	pr->ps_sigcode = 0; /* no hint */
904 	uao_reference(sigobject);
905 	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz),
906 	    sigobject, 0, 0, UVM_MAPFLAG(PROT_EXEC,
907 	    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
908 	    MADV_RANDOM, UVM_FLAG_COPYONW | UVM_FLAG_SYSCALL))) {
909 		uao_detach(sigobject);
910 		return (ENOMEM);
911 	}
912 	uvm_map_immutable(&pr->ps_vmspace->vm_map, pr->ps_sigcode,
913 	    pr->ps_sigcode + round_page(sz), 1);
914 
915 	/* Calculate PC at point of sigreturn entry */
916 	pr->ps_sigcoderet = pr->ps_sigcode + (sigcoderet - sigcode);
917 
918 	return (0);
919 }
920 
921 int
922 exec_timekeep_map(struct process *pr)
923 {
924 	size_t timekeep_sz = round_page(sizeof(struct timekeep));
925 
926 	/*
927 	 * Similar to the sigcode object
928 	 */
929 	if (timekeep_object == NULL) {
930 		vaddr_t va = 0;
931 
932 		timekeep_object = uao_create(timekeep_sz, 0);
933 		uao_reference(timekeep_object);
934 
935 		if (uvm_map(kernel_map, &va, timekeep_sz, timekeep_object,
936 		    0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
937 		    MAP_INHERIT_SHARE, MADV_RANDOM, 0))) {
938 			uao_detach(timekeep_object);
939 			timekeep_object = NULL;
940 			return (ENOMEM);
941 		}
942 		if (uvm_fault_wire(kernel_map, va, va + timekeep_sz,
943 		    PROT_READ | PROT_WRITE)) {
944 			uvm_unmap(kernel_map, va, va + timekeep_sz);
945 			uao_detach(timekeep_object);
946 			timekeep_object = NULL;
947 			return (ENOMEM);
948 		}
949 
950 		timekeep = (struct timekeep *)va;
951 		timekeep->tk_version = TK_VERSION;
952 	}
953 
954 	pr->ps_timekeep = 0; /* no hint */
955 	uao_reference(timekeep_object);
956 	if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_timekeep, timekeep_sz,
957 	    timekeep_object, 0, 0, UVM_MAPFLAG(PROT_READ, PROT_READ,
958 	    MAP_INHERIT_COPY, MADV_RANDOM, 0))) {
959 		uao_detach(timekeep_object);
960 		return (ENOMEM);
961 	}
962 	uvm_map_immutable(&pr->ps_vmspace->vm_map, pr->ps_timekeep,
963 	    pr->ps_timekeep + timekeep_sz, 1);
964 
965 	return (0);
966 }
967