xref: /netbsd-src/sys/kern/exec_elf.c (revision d6b5a89f90e84d44383a83052edf3c574fcb5d0f)
1 /*	$NetBSD: exec_elf.c,v 1.5 1996/02/09 13:25:54 fvdl Exp $	*/
2 
3 /*
4  * Copyright (c) 1994 Christos Zoulas
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/proc.h>
35 #include <sys/malloc.h>
36 #include <sys/namei.h>
37 #include <sys/vnode.h>
38 #include <sys/exec.h>
39 #include <sys/exec_elf.h>
40 
41 #include <sys/mman.h>
42 #include <vm/vm.h>
43 #include <vm/vm_param.h>
44 #include <vm/vm_map.h>
45 
46 #include <machine/cpu.h>
47 #include <machine/reg.h>
48 #include <machine/exec.h>
49 
50 #ifdef COMPAT_LINUX
51 #include <compat/linux/linux_exec.h>
52 #endif
53 
54 #ifdef COMPAT_SVR4
55 #include <compat/svr4/svr4_exec.h>
56 #endif
57 
58 int (*elf_probe_funcs[])() = {
59 #ifdef COMPAT_SVR4
60 	svr4_elf_probe,
61 #endif
62 #ifdef COMPAT_LINUX
63 	linux_elf_probe
64 #endif
65 };
66 
67 static int elf_read_from __P((struct proc *, struct vnode *, u_long,
68 	caddr_t, int));
69 static void elf_load_psection __P((struct exec_vmcmd_set *,
70 	struct vnode *, Elf32_Phdr *, u_long *, u_long *, int *));
71 
72 #define ELF_ALIGN(a, b) ((a) & ~((b) - 1))
73 
74 /*
75  * Copy arguments onto the stack in the normal way, but add some
76  * extra information in case of dynamic binding.
77  */
78 void *
79 elf_copyargs(pack, arginfo, stack, argp)
80 	struct exec_package *pack;
81 	struct ps_strings *arginfo;
82 	void *stack;
83 	void *argp;
84 {
85 	size_t len;
86 	AuxInfo ai[ELF_AUX_ENTRIES], *a;
87 	struct elf_args *ap;
88 
89 	stack = copyargs(pack, arginfo, stack, argp);
90 	if (!stack)
91 		return NULL;
92 
93 	/*
94 	 * Push extra arguments on the stack needed by dynamically
95 	 * linked binaries
96 	 */
97 	if ((ap = (struct elf_args *) pack->ep_emul_arg)) {
98 		a = ai;
99 
100 		a->au_id = AUX_phdr;
101 		a->au_v = ap->arg_phaddr;
102 		a++;
103 
104 		a->au_id = AUX_phent;
105 		a->au_v = ap->arg_phentsize;
106 		a++;
107 
108 		a->au_id = AUX_phnum;
109 		a->au_v = ap->arg_phnum;
110 		a++;
111 
112 		a->au_id = AUX_pagesz;
113 		a->au_v = NBPG;
114 		a++;
115 
116 		a->au_id = AUX_base;
117 		a->au_v = ap->arg_interp;
118 		a++;
119 
120 		a->au_id = AUX_flags;
121 		a->au_v = 0;
122 		a++;
123 
124 		a->au_id = AUX_entry;
125 		a->au_v = ap->arg_entry;
126 		a++;
127 
128 		a->au_id = AUX_null;
129 		a->au_v = 0;
130 		a++;
131 
132 		free((char *) ap, M_TEMP);
133 		len = ELF_AUX_ENTRIES * sizeof (AuxInfo);
134 		if (copyout(ai, stack, len))
135 			return NULL;
136 		stack += len;
137 	}
138 	return stack;
139 }
140 
141 /*
142  * elf_check_header():
143  *
144  * Check header for validity; return 0 of ok ENOEXEC if error
145  *
146  * XXX machine type needs to be moved to <machine/param.h> so
147  * just one comparison can be done. Unfortunately, there is both
148  * em_486 and em_386, so this would not work on the i386.
149  */
150 int
151 elf_check_header(eh, type)
152 	Elf32_Ehdr *eh;
153 	int type;
154 {
155 
156 	if (bcmp(eh->e_ident, Elf32_e_ident, Elf32_e_siz) != 0)
157 		return ENOEXEC;
158 
159 	switch (eh->e_machine) {
160 	/* XXX */
161 #ifdef i386
162 	case Elf32_em_386:
163 	case Elf32_em_486:
164 #endif
165 #ifdef sparc
166 	case Elf32_em_sparc:
167 #endif
168 		break;
169 
170 	default:
171 		return ENOEXEC;
172 	}
173 
174 	if (eh->e_type != type)
175 		return ENOEXEC;
176 
177 	return 0;
178 }
179 
180 /*
181  * elf_load_psection():
182  *
183  * Load a psection at the appropriate address
184  */
185 static void
186 elf_load_psection(vcset, vp, ph, addr, size, prot)
187 	struct exec_vmcmd_set *vcset;
188 	struct vnode *vp;
189 	Elf32_Phdr *ph;
190 	u_long *addr;
191 	u_long *size;
192 	int *prot;
193 {
194 	u_long uaddr, msize, rm, rf;
195 	long diff, offset;
196 
197 	/*
198          * If the user specified an address, then we load there.
199          */
200 	if (*addr != ELF32_NO_ADDR) {
201 		if (ph->p_align > 1) {
202 			*addr = ELF_ALIGN(*addr + ph->p_align, ph->p_align);
203 			uaddr = ELF_ALIGN(ph->p_vaddr, ph->p_align);
204 		} else
205 			uaddr = ph->p_vaddr;
206 		diff = ph->p_vaddr - uaddr;
207 	} else {
208 		*addr = uaddr = ph->p_vaddr;
209 		if (ph->p_align > 1)
210 			*addr = ELF_ALIGN(uaddr, ph->p_align);
211 		diff = uaddr - *addr;
212 	}
213 
214 	*prot |= (ph->p_flags & Elf32_pf_r) ? VM_PROT_READ : 0;
215 	*prot |= (ph->p_flags & Elf32_pf_w) ? VM_PROT_WRITE : 0;
216 	*prot |= (ph->p_flags & Elf32_pf_x) ? VM_PROT_EXECUTE : 0;
217 
218 	offset = ph->p_offset - diff;
219 	*size = ph->p_filesz + diff;
220 	msize = ph->p_memsz + diff;
221 
222 	NEW_VMCMD(vcset, vmcmd_map_readvn, *size, *addr, vp, offset, *prot);
223 
224 	/*
225          * Check if we need to extend the size of the segment
226          */
227 	rm = round_page(*addr + msize);
228 	rf = round_page(*addr + *size);
229 
230 	if (rm != rf) {
231 		NEW_VMCMD(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0, *prot);
232 		*size = msize;
233 	}
234 }
235 
236 /*
237  * elf_read_from():
238  *
239  *	Read from vnode into buffer at offset.
240  */
241 static int
242 elf_read_from(p, vp, off, buf, size)
243 	struct vnode *vp;
244 	u_long off;
245 	struct proc *p;
246 	caddr_t buf;
247 	int size;
248 {
249 	int error;
250 	int resid;
251 
252 	if ((error = vn_rdwr(UIO_READ, vp, buf, size,
253 			     off, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred,
254 			     &resid, p)) != 0)
255 		return error;
256 	/*
257          * See if we got all of it
258          */
259 	if (resid != 0)
260 		return ENOEXEC;
261 	return 0;
262 }
263 
264 /*
265  * elf_load_file():
266  *
267  * Load a file (interpreter/library) pointed to by path
268  * [stolen from coff_load_shlib()]. Made slightly generic
269  * so it might be used externally.
270  */
271 int
272 elf_load_file(p, path, vcset, entry, ap, last)
273 	struct proc *p;
274 	char *path;
275 	struct exec_vmcmd_set *vcset;
276 	u_long *entry;
277 	struct elf_args	*ap;
278 	u_long *last;
279 {
280 	int error, i;
281 	struct nameidata nd;
282 	Elf32_Ehdr eh;
283 	Elf32_Phdr *ph = NULL;
284 	u_long phsize;
285 	char *bp = NULL;
286 	u_long addr = *last;
287 
288 	bp = path;
289 	/*
290          * 1. open file
291          * 2. read filehdr
292          * 3. map text, data, and bss out of it using VM_*
293          */
294 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
295 	if ((error = namei(&nd)) != 0) {
296 		return error;
297 	}
298 	if ((error = elf_read_from(p, nd.ni_vp, 0, (caddr_t) &eh,
299 				    sizeof(eh))) != 0)
300 		goto bad;
301 
302 	if ((error = elf_check_header(&eh, Elf32_et_dyn)) != 0)
303 		goto bad;
304 
305 	phsize = eh.e_phnum * sizeof(Elf32_Phdr);
306 	ph = (Elf32_Phdr *) malloc(phsize, M_TEMP, M_WAITOK);
307 
308 	if ((error = elf_read_from(p, nd.ni_vp, eh.e_phoff,
309 				    (caddr_t) ph, phsize)) != 0)
310 		goto bad;
311 
312 	/*
313          * Load all the necessary sections
314          */
315 	for (i = 0; i < eh.e_phnum; i++) {
316 		u_long size = 0;
317 		int prot = 0;
318 
319 		switch (ph[i].p_type) {
320 		case Elf32_pt_load:
321 			elf_load_psection(vcset, nd.ni_vp, &ph[i], &addr,
322 						&size, &prot);
323 			/* If entry is within this section it must be text */
324 			if (eh.e_entry >= ph[i].p_vaddr &&
325 			    eh.e_entry < (ph[i].p_vaddr + size)) {
326 				*entry = addr + eh.e_entry;
327 				ap->arg_interp = addr;
328 			}
329 			addr += size;
330 			break;
331 
332 		case Elf32_pt_dynamic:
333 		case Elf32_pt_phdr:
334 		case Elf32_pt_note:
335 			break;
336 
337 		default:
338 			break;
339 		}
340 	}
341 
342 bad:
343 	if (ph != NULL)
344 		free((char *) ph, M_TEMP);
345 
346 	*last = addr;
347 	vrele(nd.ni_vp);
348 	return error;
349 }
350 
351 /*
352  * exec_elf_makecmds(): Prepare an Elf binary's exec package
353  *
354  * First, set of the various offsets/lengths in the exec package.
355  *
356  * Then, mark the text image busy (so it can be demand paged) or error
357  * out if this is not possible.  Finally, set up vmcmds for the
358  * text, data, bss, and stack segments.
359  *
360  * XXX no demand paging (yet?)
361  */
362 int
363 exec_elf_makecmds(p, epp)
364 	struct proc *p;
365 	struct exec_package *epp;
366 {
367 	Elf32_Ehdr *eh = epp->ep_hdr;
368 	Elf32_Phdr *ph, *pp;
369 	Elf32_Addr phdr = 0;
370 	int error, i, n, nload;
371 	char interp[MAXPATHLEN];
372 	u_long pos = 0, phsize;
373 
374 	if (epp->ep_hdrvalid < sizeof(Elf32_Ehdr))
375 		return ENOEXEC;
376 
377 	if (elf_check_header(eh, Elf32_et_exec))
378 		return ENOEXEC;
379 
380 	/*
381          * check if vnode is in open for writing, because we want to
382          * demand-page out of it.  if it is, don't do it, for various
383          * reasons
384          */
385 	if (epp->ep_vp->v_writecount != 0) {
386 #ifdef DIAGNOSTIC
387 		if (epp->ep_vp->v_flag & VTEXT)
388 			panic("exec: a VTEXT vnode has writecount != 0\n");
389 #endif
390 		return ETXTBSY;
391 	}
392 	/*
393          * Allocate space to hold all the program headers, and read them
394          * from the file
395          */
396 	phsize = eh->e_phnum * sizeof(Elf32_Phdr);
397 	ph = (Elf32_Phdr *) malloc(phsize, M_TEMP, M_WAITOK);
398 
399 	if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff,
400 				    (caddr_t) ph, phsize)) != 0)
401 		goto bad;
402 
403 	epp->ep_tsize = ELF32_NO_ADDR;
404 	epp->ep_dsize = ELF32_NO_ADDR;
405 
406 	interp[0] = '\0';
407 
408 	for (i = 0; i < eh->e_phnum; i++) {
409 		pp = &ph[i];
410 		if (pp->p_type == Elf32_pt_interp) {
411 			if (pp->p_filesz >= sizeof(interp))
412 				goto bad;
413 			if ((error = elf_read_from(p, epp->ep_vp, pp->p_offset,
414 				      (caddr_t) interp, pp->p_filesz)) != 0)
415 				goto bad;
416 			break;
417 		}
418 	}
419 
420 	/*
421 	 * On the same architecture, we may be emulating different systems.
422 	 * See which one will accept this executable. This currently only
423 	 * applies to Linux and SVR4 on the i386.
424 	 *
425 	 * Probe functions would normally see if the interpreter (if any)
426 	 * exists. Emulation packages may possibly replace the interpreter in
427 	 * interp[] with a changed path (/emul/xxx/<path>), and also
428 	 * set the ep_emul field in the exec package structure.
429 	 */
430 	if ((n = sizeof elf_probe_funcs / sizeof elf_probe_funcs[0])) {
431 		error = ENOEXEC;
432 		for (i = 0; i < n && error; i++)
433 			error = elf_probe_funcs[i](p, epp, interp, &pos);
434 
435 		if (error)
436 			goto bad;
437 	}
438 
439 	/*
440          * Load all the necessary sections
441          */
442 	for (i = nload = 0; i < eh->e_phnum; i++) {
443 		u_long  addr = ELF32_NO_ADDR, size = 0;
444 		int prot = 0;
445 
446 		pp = &ph[i];
447 
448 		switch (ph[i].p_type) {
449 		case Elf32_pt_load:
450 			/*
451 			 * XXX
452 			 * Can handle only 2 sections: text and data
453 			 */
454 			if (nload++ == 2)
455 				goto bad;
456 			elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
457 				&ph[i], &addr, &size, &prot);
458 			/*
459 			 * Decide whether it's text or data by looking
460 			 * at the entry point.
461 			 */
462 			if (eh->e_entry >= addr && eh->e_entry < (addr + size)){
463 				epp->ep_taddr = addr;
464 				epp->ep_tsize = size;
465 			} else {
466 				epp->ep_daddr = addr;
467 				epp->ep_dsize = size;
468 			}
469 			break;
470 
471 		case Elf32_pt_shlib:
472 			error = ENOEXEC;
473 			goto bad;
474 
475 		case Elf32_pt_interp:
476 			/* Already did this one */
477 		case Elf32_pt_dynamic:
478 		case Elf32_pt_note:
479 			break;
480 
481 		case Elf32_pt_phdr:
482 			/* Note address of program headers (in text segment) */
483 			phdr = pp->p_vaddr;
484 		break;
485 
486 		default:
487 			/*
488 			 * Not fatal, we don't need to understand everything
489 			 * :-)
490 			 */
491 			break;
492 		}
493 	}
494 
495 	/*
496 	 * If no position to load the interpreter was set by a probe
497 	 * function, pick the same address that a non-fixed mmap(0, ..)
498 	 * would (i.e. something safely out of the way).
499 	 */
500 	if (pos == ELF32_NO_ADDR)
501 		pos = round_page(epp->ep_daddr + MAXDSIZ);
502 
503 	/*
504          * Check if we found a dynamically linked binary and arrange to load
505          * it's interpreter
506          */
507 	if (interp[0]) {
508 		struct elf_args *ap;
509 
510 		ap = (struct elf_args *) malloc(sizeof(struct elf_args),
511 						 M_TEMP, M_WAITOK);
512 		if ((error = elf_load_file(p, interp, &epp->ep_vmcmds,
513 				&epp->ep_entry, ap, &pos)) != 0) {
514 			free((char *) ap, M_TEMP);
515 			goto bad;
516 		}
517 		pos += phsize;
518 		ap->arg_phaddr = phdr;
519 
520 		ap->arg_phentsize = eh->e_phentsize;
521 		ap->arg_phnum = eh->e_phnum;
522 		ap->arg_entry = eh->e_entry;
523 
524 		epp->ep_emul_arg = ap;
525 	} else
526 		epp->ep_entry = eh->e_entry;
527 
528 	free((char *) ph, M_TEMP);
529 	epp->ep_vp->v_flag |= VTEXT;
530 	return exec_aout_setup_stack(p, epp);
531 
532 bad:
533 	free((char *) ph, M_TEMP);
534 	kill_vmcmds(&epp->ep_vmcmds);
535 	return ENOEXEC;
536 }
537