xref: /netbsd-src/sys/kern/kern_exec.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: kern_exec.c,v 1.169 2003/06/29 22:31:19 fvdl Exp $	*/
2 
3 /*-
4  * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou
5  * Copyright (C) 1992 Wolfgang Solfrank.
6  * Copyright (C) 1992 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.169 2003/06/29 22:31:19 fvdl Exp $");
37 
38 #include "opt_ktrace.h"
39 #include "opt_syscall_debug.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/filedesc.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/mount.h>
47 #include <sys/malloc.h>
48 #include <sys/namei.h>
49 #include <sys/vnode.h>
50 #include <sys/file.h>
51 #include <sys/acct.h>
52 #include <sys/exec.h>
53 #include <sys/ktrace.h>
54 #include <sys/resourcevar.h>
55 #include <sys/wait.h>
56 #include <sys/mman.h>
57 #include <sys/ras.h>
58 #include <sys/signalvar.h>
59 #include <sys/stat.h>
60 #include <sys/syscall.h>
61 
62 #include <sys/sa.h>
63 #include <sys/savar.h>
64 #include <sys/syscallargs.h>
65 
66 #include <uvm/uvm_extern.h>
67 
68 #include <machine/cpu.h>
69 #include <machine/reg.h>
70 
71 #ifdef DEBUG_EXEC
72 #define DPRINTF(a) uprintf a
73 #else
74 #define DPRINTF(a)
75 #endif /* DEBUG_EXEC */
76 
77 MALLOC_DEFINE(M_EXEC, "exec", "argument lists & other mem used by exec");
78 
79 /*
80  * Exec function switch:
81  *
82  * Note that each makecmds function is responsible for loading the
83  * exec package with the necessary functions for any exec-type-specific
84  * handling.
85  *
86  * Functions for specific exec types should be defined in their own
87  * header file.
88  */
89 extern const struct execsw	execsw_builtin[];
90 extern int			nexecs_builtin;
91 static const struct execsw	**execsw = NULL;
92 static int			nexecs;
93 
94 u_int	exec_maxhdrsz;		/* must not be static - netbsd32 needs it */
95 
96 #ifdef LKM
97 /* list of supported emulations */
98 static
99 LIST_HEAD(emlist_head, emul_entry) el_head = LIST_HEAD_INITIALIZER(el_head);
100 struct emul_entry {
101 	LIST_ENTRY(emul_entry)	el_list;
102 	const struct emul	*el_emul;
103 	int			ro_entry;
104 };
105 
106 /* list of dynamically loaded execsw entries */
107 static
108 LIST_HEAD(execlist_head, exec_entry) ex_head = LIST_HEAD_INITIALIZER(ex_head);
109 struct exec_entry {
110 	LIST_ENTRY(exec_entry)	ex_list;
111 	const struct execsw	*es;
112 };
113 
114 /* structure used for building execw[] */
115 struct execsw_entry {
116 	struct execsw_entry	*next;
117 	const struct execsw	*es;
118 };
119 #endif /* LKM */
120 
121 /* NetBSD emul struct */
122 extern char	sigcode[], esigcode[];
123 #ifdef SYSCALL_DEBUG
124 extern const char * const syscallnames[];
125 #endif
126 #ifdef __HAVE_SYSCALL_INTERN
127 void syscall_intern(struct proc *);
128 #else
129 void syscall(void);
130 #endif
131 
132 const struct emul emul_netbsd = {
133 	"netbsd",
134 	NULL,		/* emulation path */
135 #ifndef __HAVE_MINIMAL_EMUL
136 	EMUL_HAS_SYS___syscall,
137 	NULL,
138 	SYS_syscall,
139 	SYS_NSYSENT,
140 #endif
141 	sysent,
142 #ifdef SYSCALL_DEBUG
143 	syscallnames,
144 #else
145 	NULL,
146 #endif
147 	sendsig,
148 	trapsignal,
149 	sigcode,
150 	esigcode,
151 	setregs,
152 	NULL,
153 	NULL,
154 	NULL,
155 #ifdef __HAVE_SYSCALL_INTERN
156 	syscall_intern,
157 #else
158 	syscall,
159 #endif
160 	NULL,
161 	NULL,
162 };
163 
164 #ifdef LKM
165 /*
166  * Exec lock. Used to control access to execsw[] structures.
167  * This must not be static so that netbsd32 can access it, too.
168  */
169 struct lock exec_lock;
170 
171 static void link_es(struct execsw_entry **, const struct execsw *);
172 #endif /* LKM */
173 
174 /*
175  * check exec:
176  * given an "executable" described in the exec package's namei info,
177  * see what we can do with it.
178  *
179  * ON ENTRY:
180  *	exec package with appropriate namei info
181  *	proc pointer of exec'ing proc
182  *      iff verified exec enabled then flag indicating a direct exec or
183  *        an indirect exec (i.e. for a shell script interpreter)
184  *	NO SELF-LOCKED VNODES
185  *
186  * ON EXIT:
187  *	error:	nothing held, etc.  exec header still allocated.
188  *	ok:	filled exec package, executable's vnode (unlocked).
189  *
190  * EXEC SWITCH ENTRY:
191  * 	Locked vnode to check, exec package, proc.
192  *
193  * EXEC SWITCH EXIT:
194  *	ok:	return 0, filled exec package, executable's vnode (unlocked).
195  *	error:	destructive:
196  *			everything deallocated execept exec header.
197  *		non-destructive:
198  *			error code, executable's vnode (unlocked),
199  *			exec header unmodified.
200  */
201 int
202 #ifdef VERIFIED_EXEC
203 check_exec(struct proc *p, struct exec_package *epp, int direct_exec)
204 #else
205 check_exec(struct proc *p, struct exec_package *epp)
206 #endif
207 {
208 	int		error, i;
209 	struct vnode	*vp;
210 	struct nameidata *ndp;
211 	size_t		resid;
212 
213 	ndp = epp->ep_ndp;
214 	ndp->ni_cnd.cn_nameiop = LOOKUP;
215 	ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
216 	/* first get the vnode */
217 	if ((error = namei(ndp)) != 0)
218 		return error;
219 	epp->ep_vp = vp = ndp->ni_vp;
220 
221 	/* check access and type */
222 	if (vp->v_type != VREG) {
223 		error = EACCES;
224 		goto bad1;
225 	}
226 	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
227 		goto bad1;
228 
229 	/* get attributes */
230 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
231 		goto bad1;
232 
233 	/* Check mount point */
234 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
235 		error = EACCES;
236 		goto bad1;
237 	}
238 	if (vp->v_mount->mnt_flag & MNT_NOSUID)
239 		epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
240 
241 	/* try to open it */
242 	if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
243 		goto bad1;
244 
245 	/* unlock vp, since we need it unlocked from here on out. */
246 	VOP_UNLOCK(vp, 0);
247 
248 
249 #ifdef VERIFIED_EXEC
250         /* Evaluate signature for file... */
251         if ((error = check_veriexec(p, vp, epp, direct_exec)) != 0)
252                 goto bad2;
253 #endif
254 
255 	/* now we have the file, get the exec header */
256 	uvn_attach(vp, VM_PROT_READ);
257 	error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
258 			UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
259 	if (error)
260 		goto bad2;
261 	epp->ep_hdrvalid = epp->ep_hdrlen - resid;
262 
263 	/*
264 	 * Set up default address space limits.  Can be overridden
265 	 * by individual exec packages.
266 	 *
267 	 * XXX probably should be all done in the exec pakages.
268 	 */
269 	epp->ep_vm_minaddr = VM_MIN_ADDRESS;
270 	epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS;
271 	/*
272 	 * set up the vmcmds for creation of the process
273 	 * address space
274 	 */
275 	error = ENOEXEC;
276 	for (i = 0; i < nexecs && error != 0; i++) {
277 		int newerror;
278 
279 		epp->ep_esch = execsw[i];
280 		newerror = (*execsw[i]->es_check)(p, epp);
281 		/* make sure the first "interesting" error code is saved. */
282 		if (!newerror || error == ENOEXEC)
283 			error = newerror;
284 
285 		/* if es_check call was successful, update epp->ep_es */
286 		if (!newerror && (epp->ep_flags & EXEC_HASES) == 0)
287 			epp->ep_es = execsw[i];
288 
289 		if (epp->ep_flags & EXEC_DESTR && error != 0)
290 			return error;
291 	}
292 	if (!error) {
293 		/* check that entry point is sane */
294 		if (epp->ep_entry > VM_MAXUSER_ADDRESS)
295 			error = ENOEXEC;
296 
297 		/* check limits */
298 		if ((epp->ep_tsize > MAXTSIZ) ||
299 		    (epp->ep_dsize >
300 		     (u_quad_t)p->p_rlimit[RLIMIT_DATA].rlim_cur))
301 			error = ENOMEM;
302 
303 		if (!error)
304 			return (0);
305 	}
306 
307 	/*
308 	 * free any vmspace-creation commands,
309 	 * and release their references
310 	 */
311 	kill_vmcmds(&epp->ep_vmcmds);
312 
313 bad2:
314 	/*
315 	 * close and release the vnode, restore the old one, free the
316 	 * pathname buf, and punt.
317 	 */
318 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
319 	VOP_CLOSE(vp, FREAD, p->p_ucred, p);
320 	vput(vp);
321 	PNBUF_PUT(ndp->ni_cnd.cn_pnbuf);
322 	return error;
323 
324 bad1:
325 	/*
326 	 * free the namei pathname buffer, and put the vnode
327 	 * (which we don't yet have open).
328 	 */
329 	vput(vp);				/* was still locked */
330 	PNBUF_PUT(ndp->ni_cnd.cn_pnbuf);
331 	return error;
332 }
333 
334 /*
335  * exec system call
336  */
337 /* ARGSUSED */
338 int
339 sys_execve(struct lwp *l, void *v, register_t *retval)
340 {
341 	struct sys_execve_args /* {
342 		syscallarg(const char *)	path;
343 		syscallarg(char * const *)	argp;
344 		syscallarg(char * const *)	envp;
345 	} */ *uap = v;
346 	int			error;
347 	u_int			i;
348 	struct exec_package	pack;
349 	struct nameidata	nid;
350 	struct vattr		attr;
351 	struct proc		*p;
352 	struct ucred		*cred;
353 	char			*argp;
354 	char * const		*cpp;
355 	char			*dp, *sp;
356 	long			argc, envc;
357 	size_t			len;
358 	char			*stack;
359 	struct ps_strings	arginfo;
360 	struct vmspace		*vm;
361 	char			**tmpfap;
362 	int			szsigcode;
363 	struct exec_vmcmd	*base_vcp;
364 	int			oldlwpflags;
365 
366 	/* Disable scheduler activation upcalls. */
367 	oldlwpflags = l->l_flag & (L_SA | L_SA_UPCALL);
368 	if (l->l_flag & L_SA)
369 		l->l_flag &= ~(L_SA | L_SA_UPCALL);
370 
371 	p = l->l_proc;
372 	/*
373 	 * Lock the process and set the P_INEXEC flag to indicate that
374 	 * it should be left alone until we're done here.  This is
375 	 * necessary to avoid race conditions - e.g. in ptrace() -
376 	 * that might allow a local user to illicitly obtain elevated
377 	 * privileges.
378 	 */
379 	p->p_flag |= P_INEXEC;
380 
381 	cred = p->p_ucred;
382 	base_vcp = NULL;
383 	/*
384 	 * Init the namei data to point the file user's program name.
385 	 * This is done here rather than in check_exec(), so that it's
386 	 * possible to override this settings if any of makecmd/probe
387 	 * functions call check_exec() recursively - for example,
388 	 * see exec_script_makecmds().
389 	 */
390 	NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
391 
392 	/*
393 	 * initialize the fields of the exec package.
394 	 */
395 	pack.ep_name = SCARG(uap, path);
396 	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
397 	pack.ep_hdrlen = exec_maxhdrsz;
398 	pack.ep_hdrvalid = 0;
399 	pack.ep_ndp = &nid;
400 	pack.ep_emul_arg = NULL;
401 	pack.ep_vmcmds.evs_cnt = 0;
402 	pack.ep_vmcmds.evs_used = 0;
403 	pack.ep_vap = &attr;
404 	pack.ep_flags = 0;
405 
406 #ifdef LKM
407 	lockmgr(&exec_lock, LK_SHARED, NULL);
408 #endif
409 
410 	/* see if we can run it. */
411 #ifdef VERIFIED_EXEC
412         if ((error = check_exec(p, &pack, 1)) != 0)
413         /* if ((error = check_exec(p, &pack, 0)) != 0) */
414 #else
415         if ((error = check_exec(p, &pack)) != 0)
416 #endif
417 		goto freehdr;
418 
419 	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
420 
421 	/* allocate an argument buffer */
422 	argp = (char *) uvm_km_valloc_wait(exec_map, NCARGS);
423 #ifdef DIAGNOSTIC
424 	if (argp == (vaddr_t) 0)
425 		panic("execve: argp == NULL");
426 #endif
427 	dp = argp;
428 	argc = 0;
429 
430 	/* copy the fake args list, if there's one, freeing it as we go */
431 	if (pack.ep_flags & EXEC_HASARGL) {
432 		tmpfap = pack.ep_fa;
433 		while (*tmpfap != NULL) {
434 			char *cp;
435 
436 			cp = *tmpfap;
437 			while (*cp)
438 				*dp++ = *cp++;
439 			dp++;
440 
441 			FREE(*tmpfap, M_EXEC);
442 			tmpfap++; argc++;
443 		}
444 		FREE(pack.ep_fa, M_EXEC);
445 		pack.ep_flags &= ~EXEC_HASARGL;
446 	}
447 
448 	/* Now get argv & environment */
449 	if (!(cpp = SCARG(uap, argp))) {
450 		error = EINVAL;
451 		goto bad;
452 	}
453 
454 	if (pack.ep_flags & EXEC_SKIPARG)
455 		cpp++;
456 
457 	while (1) {
458 		len = argp + ARG_MAX - dp;
459 		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
460 			goto bad;
461 		if (!sp)
462 			break;
463 		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
464 			if (error == ENAMETOOLONG)
465 				error = E2BIG;
466 			goto bad;
467 		}
468 		dp += len;
469 		cpp++;
470 		argc++;
471 	}
472 
473 	envc = 0;
474 	/* environment need not be there */
475 	if ((cpp = SCARG(uap, envp)) != NULL ) {
476 		while (1) {
477 			len = argp + ARG_MAX - dp;
478 			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
479 				goto bad;
480 			if (!sp)
481 				break;
482 			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
483 				if (error == ENAMETOOLONG)
484 					error = E2BIG;
485 				goto bad;
486 			}
487 			dp += len;
488 			cpp++;
489 			envc++;
490 		}
491 	}
492 
493 	dp = (char *) ALIGN(dp);
494 
495 	szsigcode = pack.ep_es->es_emul->e_esigcode -
496 	    pack.ep_es->es_emul->e_sigcode;
497 
498 	/* Now check if args & environ fit into new stack */
499 	if (pack.ep_flags & EXEC_32)
500 		len = ((argc + envc + 2 + pack.ep_es->es_arglen) *
501 		    sizeof(int) + sizeof(int) + dp + STACKGAPLEN +
502 		    szsigcode + sizeof(struct ps_strings)) - argp;
503 	else
504 		len = ((argc + envc + 2 + pack.ep_es->es_arglen) *
505 		    sizeof(char *) + sizeof(int) + dp + STACKGAPLEN +
506 		    szsigcode + sizeof(struct ps_strings)) - argp;
507 
508 	len = ALIGN(len);	/* make the stack "safely" aligned */
509 
510 	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
511 		error = ENOMEM;
512 		goto bad;
513 	}
514 
515 	/* Get rid of other LWPs/ */
516 	p->p_flag |= P_WEXIT; /* XXX hack. lwp-exit stuff wants to see it. */
517 	exit_lwps(l);
518 	p->p_flag &= ~P_WEXIT;
519 	KDASSERT(p->p_nlwps == 1);
520 
521 	/* This is now LWP 1 */
522 	l->l_lid = 1;
523 	p->p_nlwpid = 1;
524 
525 	/* Release any SA state. */
526 	if (p->p_sa) {
527 		p->p_flag &= ~P_SA;
528 		free(p->p_sa->sa_stacks, M_SA);
529 		pool_put(&sadata_pool, p->p_sa);
530 		p->p_sa = NULL;
531 	}
532 
533 	/* Remove POSIX timers */
534 	timers_free(p, TIMERS_POSIX);
535 
536 	/* adjust "active stack depth" for process VSZ */
537 	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */
538 
539 	/*
540 	 * Do whatever is necessary to prepare the address space
541 	 * for remapping.  Note that this might replace the current
542 	 * vmspace with another!
543 	 */
544 	uvmspace_exec(l, pack.ep_vm_minaddr, pack.ep_vm_maxaddr);
545 
546 	/* Now map address space */
547 	vm = p->p_vmspace;
548 	vm->vm_taddr = (caddr_t) pack.ep_taddr;
549 	vm->vm_tsize = btoc(pack.ep_tsize);
550 	vm->vm_daddr = (caddr_t) pack.ep_daddr;
551 	vm->vm_dsize = btoc(pack.ep_dsize);
552 	vm->vm_ssize = btoc(pack.ep_ssize);
553 	vm->vm_maxsaddr = (caddr_t) pack.ep_maxsaddr;
554 	vm->vm_minsaddr = (caddr_t) pack.ep_minsaddr;
555 
556 	/* create the new process's VM space by running the vmcmds */
557 #ifdef DIAGNOSTIC
558 	if (pack.ep_vmcmds.evs_used == 0)
559 		panic("execve: no vmcmds");
560 #endif
561 	for (i = 0; i < pack.ep_vmcmds.evs_used && !error; i++) {
562 		struct exec_vmcmd *vcp;
563 
564 		vcp = &pack.ep_vmcmds.evs_cmds[i];
565 		if (vcp->ev_flags & VMCMD_RELATIVE) {
566 #ifdef DIAGNOSTIC
567 			if (base_vcp == NULL)
568 				panic("execve: relative vmcmd with no base");
569 			if (vcp->ev_flags & VMCMD_BASE)
570 				panic("execve: illegal base & relative vmcmd");
571 #endif
572 			vcp->ev_addr += base_vcp->ev_addr;
573 		}
574 		error = (*vcp->ev_proc)(p, vcp);
575 #ifdef DEBUG_EXEC
576 		if (error) {
577 			int j;
578 			struct exec_vmcmd *vp = &pack.ep_vmcmds.evs_cmds[0];
579 			for (j = 0; j <= i; j++)
580 				uprintf(
581 			    "vmcmd[%d] = %#lx/%#lx fd@%#lx prot=0%o flags=%d\n",
582 				    j, vp[j].ev_addr, vp[j].ev_len,
583 				    vp[j].ev_offset, vp[j].ev_prot,
584 				    vp[j].ev_flags);
585 		}
586 #endif /* DEBUG_EXEC */
587 		if (vcp->ev_flags & VMCMD_BASE)
588 			base_vcp = vcp;
589 	}
590 
591 	/* free the vmspace-creation commands, and release their references */
592 	kill_vmcmds(&pack.ep_vmcmds);
593 
594 	/* if an error happened, deallocate and punt */
595 	if (error) {
596 		DPRINTF(("execve: vmcmd %i failed: %d\n", i - 1, error));
597 		goto exec_abort;
598 	}
599 
600 	/* remember information about the process */
601 	arginfo.ps_nargvstr = argc;
602 	arginfo.ps_nenvstr = envc;
603 
604 	stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr,
605 		sizeof(struct ps_strings) + szsigcode),
606 		len - (sizeof(struct ps_strings) + szsigcode));
607 #ifdef __MACHINE_STACK_GROWS_UP
608 	/*
609 	 * The copyargs call always copies into lower addresses
610 	 * first, moving towards higher addresses, starting with
611 	 * the stack pointer that we give.  When the stack grows
612 	 * down, this puts argc/argv/envp very shallow on the
613 	 * stack, right at the first user stack pointer, and puts
614 	 * STACKGAPLEN very deep in the stack.  When the stack
615 	 * grows up, the situation is reversed.
616 	 *
617 	 * Normally, this is no big deal.  But the ld_elf.so _rtld()
618 	 * function expects to be called with a single pointer to
619 	 * a region that has a few words it can stash values into,
620 	 * followed by argc/argv/envp.  When the stack grows down,
621 	 * it's easy to decrement the stack pointer a little bit to
622 	 * allocate the space for these few words and pass the new
623 	 * stack pointer to _rtld.  When the stack grows up, however,
624 	 * a few words before argc is part of the signal trampoline,
625 	 * so we have a problem.
626 	 *
627 	 * Instead of changing how _rtld works, we take the easy way
628 	 * out and steal 32 bytes before we call copyargs.  This
629 	 * space is effectively stolen from STACKGAPLEN.
630 	 */
631 	stack += 32;
632 #endif /* __MACHINE_STACK_GROWS_UP */
633 
634 	/* Now copy argc, args & environ to new stack */
635 	error = (*pack.ep_es->es_copyargs)(p, &pack, &arginfo, &stack, argp);
636 	if (error) {
637 		DPRINTF(("execve: copyargs failed %d\n", error));
638 		goto exec_abort;
639 	}
640 	/* Move the stack back to original point */
641 	stack = (char *)STACK_GROW(vm->vm_minsaddr, len);
642 
643 	/* fill process ps_strings info */
644 	p->p_psstr = (struct ps_strings *)STACK_ALLOC(vm->vm_minsaddr,
645 	    sizeof(struct ps_strings));
646 	p->p_psargv = offsetof(struct ps_strings, ps_argvstr);
647 	p->p_psnargv = offsetof(struct ps_strings, ps_nargvstr);
648 	p->p_psenv = offsetof(struct ps_strings, ps_envstr);
649 	p->p_psnenv = offsetof(struct ps_strings, ps_nenvstr);
650 
651 	/* copy out the process's ps_strings structure */
652 	if ((error = copyout(&arginfo, (char *)p->p_psstr,
653 	    sizeof(arginfo))) != 0) {
654 		DPRINTF(("execve: ps_strings copyout %p->%p size %ld failed\n",
655 		       &arginfo, (char *)p->p_psstr, (long)sizeof(arginfo)));
656 		goto exec_abort;
657 	}
658 
659 	/* copy out the process's signal trampoline code */
660 	if (szsigcode) {
661 		p->p_sigctx.ps_sigcode = STACK_ALLOC(STACK_MAX(p->p_psstr,
662 		    sizeof(struct ps_strings)), szsigcode);
663 		if ((error = copyout((char *)pack.ep_es->es_emul->e_sigcode,
664 		    p->p_sigctx.ps_sigcode, szsigcode)) != 0) {
665 			DPRINTF(("execve: sig trampoline copyout failed\n"));
666 			goto exec_abort;
667 		}
668 #ifdef PMAP_NEED_PROCWR
669 		/* This is code. Let the pmap do what is needed. */
670 		pmap_procwr(p, (vaddr_t)p->p_sigctx.ps_sigcode, szsigcode);
671 #endif
672 	}
673 
674 	stopprofclock(p);	/* stop profiling */
675 	fdcloseexec(p);		/* handle close on exec */
676 	execsigs(p);		/* reset catched signals */
677 
678 	l->l_ctxlink = NULL;	/* reset ucontext link */
679 
680 	/* set command name & other accounting info */
681 	len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
682 	memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len);
683 	p->p_comm[len] = 0;
684 	p->p_acflag &= ~AFORK;
685 
686 	/* record proc's vnode, for use by procfs and others */
687         if (p->p_textvp)
688                 vrele(p->p_textvp);
689 	VREF(pack.ep_vp);
690 	p->p_textvp = pack.ep_vp;
691 
692 	p->p_flag |= P_EXEC;
693 	if (p->p_flag & P_PPWAIT) {
694 		p->p_flag &= ~P_PPWAIT;
695 		wakeup((caddr_t) p->p_pptr);
696 	}
697 
698 	/*
699 	 * deal with set[ug]id.
700 	 * MNT_NOSUID has already been used to disable s[ug]id.
701 	 */
702 	if ((p->p_flag & P_TRACED) == 0 &&
703 
704 	    (((attr.va_mode & S_ISUID) != 0 &&
705 	      p->p_ucred->cr_uid != attr.va_uid) ||
706 
707 	     ((attr.va_mode & S_ISGID) != 0 &&
708 	      p->p_ucred->cr_gid != attr.va_gid))) {
709 		/*
710 		 * Mark the process as SUGID before we do
711 		 * anything that might block.
712 		 */
713 		p_sugid(p);
714 
715 		/* Make sure file descriptors 0..2 are in use. */
716 		if ((error = fdcheckstd(p)) != 0)
717 			goto exec_abort;
718 
719 		p->p_ucred = crcopy(cred);
720 #ifdef KTRACE
721 		/*
722 		 * If process is being ktraced, turn off - unless
723 		 * root set it.
724 		 */
725 		if (p->p_tracep && !(p->p_traceflag & KTRFAC_ROOT))
726 			ktrderef(p);
727 #endif
728 		if (attr.va_mode & S_ISUID)
729 			p->p_ucred->cr_uid = attr.va_uid;
730 		if (attr.va_mode & S_ISGID)
731 			p->p_ucred->cr_gid = attr.va_gid;
732 	} else
733 		p->p_flag &= ~P_SUGID;
734 	p->p_cred->p_svuid = p->p_ucred->cr_uid;
735 	p->p_cred->p_svgid = p->p_ucred->cr_gid;
736 
737 #if defined(__HAVE_RAS)
738 	/*
739 	 * Remove all RASs from the address space.
740 	 */
741 	ras_purgeall(p);
742 #endif
743 
744 	doexechooks(p);
745 
746 	uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);
747 
748 	PNBUF_PUT(nid.ni_cnd.cn_pnbuf);
749 	vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
750 	VOP_CLOSE(pack.ep_vp, FREAD, cred, p);
751 	vput(pack.ep_vp);
752 
753 	/* notify others that we exec'd */
754 	KNOTE(&p->p_klist, NOTE_EXEC);
755 
756 	/* setup new registers and do misc. setup. */
757 	(*pack.ep_es->es_emul->e_setregs)(l, &pack, (u_long) stack);
758 	if (pack.ep_es->es_setregs)
759 		(*pack.ep_es->es_setregs)(l, &pack, (u_long) stack);
760 
761 	if (p->p_flag & P_TRACED)
762 		psignal(p, SIGTRAP);
763 
764 	free(pack.ep_hdr, M_EXEC);
765 
766 	/*
767 	 * Call emulation specific exec hook. This can setup setup per-process
768 	 * p->p_emuldata or do any other per-process stuff an emulation needs.
769 	 *
770 	 * If we are executing process of different emulation than the
771 	 * original forked process, call e_proc_exit() of the old emulation
772 	 * first, then e_proc_exec() of new emulation. If the emulation is
773 	 * same, the exec hook code should deallocate any old emulation
774 	 * resources held previously by this process.
775 	 */
776 	if (p->p_emul && p->p_emul->e_proc_exit
777 	    && p->p_emul != pack.ep_es->es_emul)
778 		(*p->p_emul->e_proc_exit)(p);
779 
780 	/*
781 	 * Call exec hook. Emulation code may NOT store reference to anything
782 	 * from &pack.
783 	 */
784         if (pack.ep_es->es_emul->e_proc_exec)
785                 (*pack.ep_es->es_emul->e_proc_exec)(p, &pack);
786 
787 	/* update p_emul, the old value is no longer needed */
788 	p->p_emul = pack.ep_es->es_emul;
789 
790 	/* ...and the same for p_execsw */
791 	p->p_execsw = pack.ep_es;
792 
793 #ifdef __HAVE_SYSCALL_INTERN
794 	(*p->p_emul->e_syscall_intern)(p);
795 #endif
796 #ifdef KTRACE
797 	if (KTRPOINT(p, KTR_EMUL))
798 		ktremul(p);
799 #endif
800 
801 #ifdef LKM
802 	lockmgr(&exec_lock, LK_RELEASE, NULL);
803 #endif
804 	p->p_flag &= ~P_INEXEC;
805 
806 	if (p->p_flag & P_STOPEXEC) {
807 		int s;
808 
809 		sigminusset(&contsigmask, &p->p_sigctx.ps_siglist);
810 		SCHED_LOCK(s);
811 		p->p_stat = SSTOP;
812 		l->l_stat = LSSTOP;
813 		p->p_nrlwps--;
814 		mi_switch(l, NULL);
815 		SCHED_ASSERT_UNLOCKED();
816 		splx(s);
817 	}
818 
819 	return (EJUSTRETURN);
820 
821  bad:
822 	p->p_flag &= ~P_INEXEC;
823 	/* free the vmspace-creation commands, and release their references */
824 	kill_vmcmds(&pack.ep_vmcmds);
825 	/* kill any opened file descriptor, if necessary */
826 	if (pack.ep_flags & EXEC_HASFD) {
827 		pack.ep_flags &= ~EXEC_HASFD;
828 		(void) fdrelease(p, pack.ep_fd);
829 	}
830 	/* close and put the exec'd file */
831 	vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
832 	VOP_CLOSE(pack.ep_vp, FREAD, cred, p);
833 	vput(pack.ep_vp);
834 	PNBUF_PUT(nid.ni_cnd.cn_pnbuf);
835 	uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);
836 
837  freehdr:
838 	l->l_flag |= oldlwpflags;
839 	p->p_flag &= ~P_INEXEC;
840 #ifdef LKM
841 	lockmgr(&exec_lock, LK_RELEASE, NULL);
842 #endif
843 
844 	free(pack.ep_hdr, M_EXEC);
845 	return error;
846 
847  exec_abort:
848 	p->p_flag &= ~P_INEXEC;
849 #ifdef LKM
850 	lockmgr(&exec_lock, LK_RELEASE, NULL);
851 #endif
852 
853 	/*
854 	 * the old process doesn't exist anymore.  exit gracefully.
855 	 * get rid of the (new) address space we have created, if any, get rid
856 	 * of our namei data and vnode, and exit noting failure
857 	 */
858 	uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
859 		VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
860 	if (pack.ep_emul_arg)
861 		FREE(pack.ep_emul_arg, M_TEMP);
862 	PNBUF_PUT(nid.ni_cnd.cn_pnbuf);
863 	vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
864 	VOP_CLOSE(pack.ep_vp, FREAD, cred, p);
865 	vput(pack.ep_vp);
866 	uvm_km_free_wakeup(exec_map, (vaddr_t) argp, NCARGS);
867 	free(pack.ep_hdr, M_EXEC);
868 	exit1(l, W_EXITCODE(error, SIGABRT));
869 
870 	/* NOTREACHED */
871 	return 0;
872 }
873 
874 
875 int
876 copyargs(struct proc *p, struct exec_package *pack, struct ps_strings *arginfo,
877     char **stackp, void *argp)
878 {
879 	char	**cpp, *dp, *sp;
880 	size_t	len;
881 	void	*nullp;
882 	long	argc, envc;
883 	int	error;
884 
885 	cpp = (char **)*stackp;
886 	nullp = NULL;
887 	argc = arginfo->ps_nargvstr;
888 	envc = arginfo->ps_nenvstr;
889 	if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0)
890 		return error;
891 
892 	dp = (char *) (cpp + argc + envc + 2 + pack->ep_es->es_arglen);
893 	sp = argp;
894 
895 	/* XXX don't copy them out, remap them! */
896 	arginfo->ps_argvstr = cpp; /* remember location of argv for later */
897 
898 	for (; --argc >= 0; sp += len, dp += len)
899 		if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0 ||
900 		    (error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0)
901 			return error;
902 
903 	if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0)
904 		return error;
905 
906 	arginfo->ps_envstr = cpp; /* remember location of envp for later */
907 
908 	for (; --envc >= 0; sp += len, dp += len)
909 		if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0 ||
910 		    (error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0)
911 			return error;
912 
913 	if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0)
914 		return error;
915 
916 	*stackp = (char *)cpp;
917 	return 0;
918 }
919 
920 #ifdef LKM
921 /*
922  * Find an emulation of given name in list of emulations.
923  * Needs to be called with the exec_lock held.
924  */
925 const struct emul *
926 emul_search(const char *name)
927 {
928 	struct emul_entry *it;
929 
930 	LIST_FOREACH(it, &el_head, el_list) {
931 		if (strcmp(name, it->el_emul->e_name) == 0)
932 			return it->el_emul;
933 	}
934 
935 	return NULL;
936 }
937 
938 /*
939  * Add an emulation to list, if it's not there already.
940  */
941 int
942 emul_register(const struct emul *emul, int ro_entry)
943 {
944 	struct emul_entry	*ee;
945 	int			error;
946 
947 	error = 0;
948 	lockmgr(&exec_lock, LK_SHARED, NULL);
949 
950 	if (emul_search(emul->e_name)) {
951 		error = EEXIST;
952 		goto out;
953 	}
954 
955 	MALLOC(ee, struct emul_entry *, sizeof(struct emul_entry),
956 		M_EXEC, M_WAITOK);
957 	ee->el_emul = emul;
958 	ee->ro_entry = ro_entry;
959 	LIST_INSERT_HEAD(&el_head, ee, el_list);
960 
961  out:
962 	lockmgr(&exec_lock, LK_RELEASE, NULL);
963 	return error;
964 }
965 
966 /*
967  * Remove emulation with name 'name' from list of supported emulations.
968  */
969 int
970 emul_unregister(const char *name)
971 {
972 	const struct proclist_desc *pd;
973 	struct emul_entry	*it;
974 	int			i, error;
975 	struct proc		*ptmp;
976 
977 	error = 0;
978 	lockmgr(&exec_lock, LK_SHARED, NULL);
979 
980 	LIST_FOREACH(it, &el_head, el_list) {
981 		if (strcmp(it->el_emul->e_name, name) == 0)
982 			break;
983 	}
984 
985 	if (!it) {
986 		error = ENOENT;
987 		goto out;
988 	}
989 
990 	if (it->ro_entry) {
991 		error = EBUSY;
992 		goto out;
993 	}
994 
995 	/* test if any execw[] entry is still using this */
996 	for(i=0; i < nexecs; i++) {
997 		if (execsw[i]->es_emul == it->el_emul) {
998 			error = EBUSY;
999 			goto out;
1000 		}
1001 	}
1002 
1003 	/*
1004 	 * Test if any process is running under this emulation - since
1005 	 * emul_unregister() is running quite sendomly, it's better
1006 	 * to do expensive check here than to use any locking.
1007 	 */
1008 	proclist_lock_read();
1009 	for (pd = proclists; pd->pd_list != NULL && !error; pd++) {
1010 		LIST_FOREACH(ptmp, pd->pd_list, p_list) {
1011 			if (ptmp->p_emul == it->el_emul) {
1012 				error = EBUSY;
1013 				break;
1014 			}
1015 		}
1016 	}
1017 	proclist_unlock_read();
1018 
1019 	if (error)
1020 		goto out;
1021 
1022 
1023 	/* entry is not used, remove it */
1024 	LIST_REMOVE(it, el_list);
1025 	FREE(it, M_EXEC);
1026 
1027  out:
1028 	lockmgr(&exec_lock, LK_RELEASE, NULL);
1029 	return error;
1030 }
1031 
1032 /*
1033  * Add execsw[] entry.
1034  */
1035 int
1036 exec_add(struct execsw *esp, const char *e_name)
1037 {
1038 	struct exec_entry	*it;
1039 	int			error;
1040 
1041 	error = 0;
1042 	lockmgr(&exec_lock, LK_EXCLUSIVE, NULL);
1043 
1044 	if (!esp->es_emul) {
1045 		esp->es_emul = emul_search(e_name);
1046 		if (!esp->es_emul) {
1047 			error = ENOENT;
1048 			goto out;
1049 		}
1050 	}
1051 
1052 	LIST_FOREACH(it, &ex_head, ex_list) {
1053 		/* assume tuple (makecmds, probe_func, emulation) is unique */
1054 		if (it->es->es_check == esp->es_check
1055 		    && it->es->u.elf_probe_func == esp->u.elf_probe_func
1056 		    && it->es->es_emul == esp->es_emul) {
1057 			error = EEXIST;
1058 			goto out;
1059 		}
1060 	}
1061 
1062 	/* if we got here, the entry doesn't exist yet */
1063 	MALLOC(it, struct exec_entry *, sizeof(struct exec_entry),
1064 		M_EXEC, M_WAITOK);
1065 	it->es = esp;
1066 	LIST_INSERT_HEAD(&ex_head, it, ex_list);
1067 
1068 	/* update execsw[] */
1069 	exec_init(0);
1070 
1071  out:
1072 	lockmgr(&exec_lock, LK_RELEASE, NULL);
1073 	return error;
1074 }
1075 
1076 /*
1077  * Remove execsw[] entry.
1078  */
1079 int
1080 exec_remove(const struct execsw *esp)
1081 {
1082 	struct exec_entry	*it;
1083 	int			error;
1084 
1085 	error = 0;
1086 	lockmgr(&exec_lock, LK_EXCLUSIVE, NULL);
1087 
1088 	LIST_FOREACH(it, &ex_head, ex_list) {
1089 		/* assume tuple (makecmds, probe_func, emulation) is unique */
1090 		if (it->es->es_check == esp->es_check
1091 		    && it->es->u.elf_probe_func == esp->u.elf_probe_func
1092 		    && it->es->es_emul == esp->es_emul)
1093 			break;
1094 	}
1095 	if (!it) {
1096 		error = ENOENT;
1097 		goto out;
1098 	}
1099 
1100 	/* remove item from list and free resources */
1101 	LIST_REMOVE(it, ex_list);
1102 	FREE(it, M_EXEC);
1103 
1104 	/* update execsw[] */
1105 	exec_init(0);
1106 
1107  out:
1108 	lockmgr(&exec_lock, LK_RELEASE, NULL);
1109 	return error;
1110 }
1111 
1112 static void
1113 link_es(struct execsw_entry **listp, const struct execsw *esp)
1114 {
1115 	struct execsw_entry *et, *e1;
1116 
1117 	MALLOC(et, struct execsw_entry *, sizeof(struct execsw_entry),
1118 			M_TEMP, M_WAITOK);
1119 	et->next = NULL;
1120 	et->es = esp;
1121 	if (*listp == NULL) {
1122 		*listp = et;
1123 		return;
1124 	}
1125 
1126 	switch(et->es->es_prio) {
1127 	case EXECSW_PRIO_FIRST:
1128 		/* put new entry as the first */
1129 		et->next = *listp;
1130 		*listp = et;
1131 		break;
1132 	case EXECSW_PRIO_ANY:
1133 		/* put new entry after all *_FIRST and *_ANY entries */
1134 		for(e1 = *listp; e1->next
1135 			&& e1->next->es->es_prio != EXECSW_PRIO_LAST;
1136 			e1 = e1->next);
1137 		et->next = e1->next;
1138 		e1->next = et;
1139 		break;
1140 	case EXECSW_PRIO_LAST:
1141 		/* put new entry as the last one */
1142 		for(e1 = *listp; e1->next; e1 = e1->next);
1143 		e1->next = et;
1144 		break;
1145 	default:
1146 #ifdef DIAGNOSTIC
1147 		panic("execw[] entry with unknown priority %d found",
1148 			et->es->es_prio);
1149 #endif
1150 		break;
1151 	}
1152 }
1153 
1154 /*
1155  * Initialize exec structures. If init_boot is true, also does necessary
1156  * one-time initialization (it's called from main() that way).
1157  * Once system is multiuser, this should be called with exec_lock held,
1158  * i.e. via exec_{add|remove}().
1159  */
1160 int
1161 exec_init(int init_boot)
1162 {
1163 	const struct execsw	**new_es, * const *old_es;
1164 	struct execsw_entry	*list, *e1;
1165 	struct exec_entry	*e2;
1166 	int			i, es_sz;
1167 
1168 	if (init_boot) {
1169 		/* do one-time initializations */
1170 		lockinit(&exec_lock, PWAIT, "execlck", 0, 0);
1171 
1172 		/* register compiled-in emulations */
1173 		for(i=0; i < nexecs_builtin; i++) {
1174 			if (execsw_builtin[i].es_emul)
1175 				emul_register(execsw_builtin[i].es_emul, 1);
1176 		}
1177 #ifdef DIAGNOSTIC
1178 		if (i == 0)
1179 			panic("no emulations found in execsw_builtin[]");
1180 #endif
1181 	}
1182 
1183 	/*
1184 	 * Build execsw[] array from builtin entries and entries added
1185 	 * at runtime.
1186 	 */
1187 	list = NULL;
1188 	for(i=0; i < nexecs_builtin; i++)
1189 		link_es(&list, &execsw_builtin[i]);
1190 
1191 	/* Add dynamically loaded entries */
1192 	es_sz = nexecs_builtin;
1193 	LIST_FOREACH(e2, &ex_head, ex_list) {
1194 		link_es(&list, e2->es);
1195 		es_sz++;
1196 	}
1197 
1198 	/*
1199 	 * Now that we have sorted all execw entries, create new execsw[]
1200 	 * and free no longer needed memory in the process.
1201 	 */
1202 	new_es = malloc(es_sz * sizeof(struct execsw *), M_EXEC, M_WAITOK);
1203 	for(i=0; list; i++) {
1204 		new_es[i] = list->es;
1205 		e1 = list->next;
1206 		FREE(list, M_TEMP);
1207 		list = e1;
1208 	}
1209 
1210 	/*
1211 	 * New execsw[] array built, now replace old execsw[] and free
1212 	 * used memory.
1213 	 */
1214 	old_es = execsw;
1215 	execsw = new_es;
1216 	nexecs = es_sz;
1217 	if (old_es)
1218 		free((void *)old_es, M_EXEC);
1219 
1220 	/*
1221 	 * Figure out the maximum size of an exec header.
1222 	 */
1223 	exec_maxhdrsz = 0;
1224 	for (i = 0; i < nexecs; i++) {
1225 		if (execsw[i]->es_hdrsz > exec_maxhdrsz)
1226 			exec_maxhdrsz = execsw[i]->es_hdrsz;
1227 	}
1228 
1229 	return 0;
1230 }
1231 #endif
1232 
1233 #ifndef LKM
1234 /*
1235  * Simplified exec_init() for kernels without LKMs. Only initialize
1236  * exec_maxhdrsz and execsw[].
1237  */
1238 int
1239 exec_init(int init_boot)
1240 {
1241 	int i;
1242 
1243 #ifdef DIAGNOSTIC
1244 	if (!init_boot)
1245 		panic("exec_init(): called with init_boot == 0");
1246 #endif
1247 
1248 	/* do one-time initializations */
1249 	nexecs = nexecs_builtin;
1250 	execsw = malloc(nexecs*sizeof(struct execsw *), M_EXEC, M_WAITOK);
1251 
1252 	/*
1253 	 * Fill in execsw[] and figure out the maximum size of an exec header.
1254 	 */
1255 	exec_maxhdrsz = 0;
1256 	for(i=0; i < nexecs; i++) {
1257 		execsw[i] = &execsw_builtin[i];
1258 		if (execsw_builtin[i].es_hdrsz > exec_maxhdrsz)
1259 			exec_maxhdrsz = execsw_builtin[i].es_hdrsz;
1260 	}
1261 
1262 	return 0;
1263 
1264 }
1265 #endif /* !LKM */
1266